# -*- coding:utf-8 -*-
# !python3
import json
import os
import subprocess
import time
from xml.dom import minidom

import requests
from requests.adapters import HTTPAdapter
from tmdbv3api import TMDb, Movie
# https://developers.themoviedb.org/3/getting-started/introduction
# https://developers.themoviedb.org/3/search/search-people
from unipath import Path, FILES

# thdbAPI_KEY
from md_help import add_item, item_exist

api_key = 'c24de12a959c4ce4b8adf7fa7f302ef9'
domain = 'http://localhost:8096'
# pip install pysocks
proxies = {
    'http': 'socks5://127.0.0.1:7890',
    'https': 'socks5://127.0.0.1:7890'
}


# ########################## 演员信息
# 根据ID获取演员信息
def get_person_info_id(person_id: int):
    """
        https://developers.themoviedb.org/3/people/get-person-details
        https://api.themoviedb.org/3/person/{person_id}?api_key=<<api_key>>&language=en-US
    :param person_id: 演员ID
    :return:
    """
    # person = Person()
    # person.language = 'zh'
    # person.api_key = app_key
    # p = person.details(person_id)
    # print(p)

    url = '{0}/person/{1}'.format(domain, person_id)
    headers = {
        'accept': 'application/json',
        'Connection': 'close'
    }
    # 参数
    params = {
        'api_key': api_key,
        'append_to_response': 'videos,images',
        'language': 'zh'
    }
    # TODO 增加连接重试次数(一共4次链接)
    sess = requests.Session()
    sess.mount('http://', HTTPAdapter(max_retries=3))
    sess.mount('https://', HTTPAdapter(max_retries=3))
    sess.keep_alive = False  # 关闭多余连接
    requests.adapters.DEFAULT_RETRIES = 5
    # 请求数据
    response_info = requests.get(headers=headers, url=url, params=params, proxies=proxies)
    if response_info.status_code != 200:
        return ''
    return response_info.json()


# 根据演员姓名获取演员信息
def get_person_info_name(person_name: str):
    """
        https://developers.themoviedb.org/3/search/search-people
        https://api.themoviedb.org/3/search/person?api_key=<<api_key>>&language=en-US&page=1&include_adult=false
    :param person_name: 演员名称
    :return:
    """
    # person = Person()
    # person.language = 'zh'
    # person.api_key = app_key
    # p = person.details(person_id)
    # print(p)
    url = '{0}/search/person'.format(domain)
    headers = {
        'accept': 'application/json'
    }
    # 参数
    params = {
        'api_key': api_key,
        'page': '1',
        'query': person_name,
        'include_adult': 'false',
        'language': 'zh'
    }
    # 请求数据
    response = requests.get(headers=headers, url=url, params=params, proxies=proxies).json()
    print(response)


# ########################## 视频信息
def get_movie_info(imdb_id: int):
    tmdb = TMDb()
    tmdb.api_key = 'ba60bc155ae44f212ce6f70ea6948bcb'
    tmdb.language = 'zh'
    tmdb.debug = True
    movie = Movie()
    res = movie.external(external_id=imdb_id, external_source="imdb_id")
    print(res.get("movie_results"))


# -------------------------------- 下载图片
# 从nfo文件中读取，现在演员头像
def download_image_nfo(nfo_dir_path: str, actor_info: bool, actor_image: bool, actor_dir: str):
    """
        1、根据nfo文件下载演员信息
    :param nfo_dir_path:文件夹路径
    :param actor_info:是否需要现在演员信息 true:下载，false:不下载
    :return:
    """

    nfo_dir_path = Path(nfo_dir_path)
    # 看是否有该文件夹，没有则创建文件夹
    actor_pic_dir = Path(actor_dir, 'pic', time.strftime("%Y%m", time.localtime()))
    if not actor_pic_dir.exists():
        actor_pic_dir.mkdir(True)

    actor_info_dir = Path(actor_dir, 'info')
    if not os.path.exists(actor_info_dir):
        os.mkdir(actor_info_dir, True)

    count = 0
    count_all = str(len(nfo_dir_path.listdir()))
    print('[+] 找到' + count_all + ' 文件夹')
    # 解析失败的nfo集合
    fail_nfo_list = []
    for item_dir_path in nfo_dir_path.listdir():
        count = count + 1
        percentage = str(count / int(count_all) * 100)[:4] + '%'
        print('[+] ---------------------------------------------- - ' + percentage + ' [' + str(count) + '/' + count_all + '] - ' + item_dir_path)

        download_actor_list = []
        try:
            for item_nfo_path in [nfo_file_info for nfo_file_info in item_dir_path.walk(filter=FILES) if nfo_file_info.ext in ['.nfo']]:
                xml_dom = read_nfo(item_nfo_path)
                if not xml_dom:
                    fail_nfo_list.append(item_nfo_path)
                    continue
                # 获取根节点
                root = xml_dom.documentElement
                # 通过dom对象或根元素，再根据标签名获取元素节点，是个列表
                for item_name in root.getElementsByTagName('actor'):
                    temp_actor = {}

                    name = item_name.getElementsByTagName('name')[0].childNodes
                    if len(name) > 0:
                        name = name[0].data
                    else:
                        name = ''
                    temp_actor['name'] = name

                    profile = item_name.getElementsByTagName('profile')[0].childNodes
                    if len(profile) > 0:
                        profile = profile[0].data
                    else:
                        profile = ''
                    temp_actor['profile'] = profile

                    thumb = item_name.getElementsByTagName('thumb')[0].childNodes
                    if len(thumb) > 0:
                        thumb = thumb[0].data
                    else:
                        thumb = ''
                    temp_actor['thumb'] = thumb

                    download_actor_list.append(temp_actor)
        except:
            print("[#] 异常 {0}".format(item_dir_path))
        current_index = 0
        for item_actor in download_actor_list:
            current_index = current_index + 1
            if current_index >= 16:
                print("[!] 只下载前11个演员")
                break
            print("[!] ----------------------")
            name = item_actor['name']
            thumb = item_actor['thumb']
            profile = item_actor['profile']

            print('[+] name:' + name)
            print('[+] thumb:' + thumb)
            print('[+] profile:' + profile)

            if item_exist(name) > 0:
                print("[!] {0} 已经存在".format(name))
                continue
            # ----------------------------------
            # 没有获取到id，并且下载现在信息
            if actor_info and len(profile) == 0:
                print('[!] 演员ID获取失败==>' + name)
                continue
            if actor_info:
                person_info = get_person_info_id(int(profile.split("/")[-1]))
                if len(person_info) != 0:
                    with open(Path(actor_info_dir, name + '.json'), 'w') as f:
                        f.write(json.dumps(person_info))

            # ----------------------------------
            if actor_image and len(thumb) == 0:
                print('[!] 获取头像地址失败==>' + name)
                continue
            if actor_image:
                file_name = name + '.' + thumb.split('.')[-1]
                if not download_image(thumb, actor_pic_dir, file_name):
                    print("[*] 下载失败停止5秒 " + name)
                    continue
                add_item((thumb, name, profile, Path(actor_pic_dir, file_name).replace("E:\私人资料\\", "")))
            time.sleep(1)

    print('***************************************')
    print('*********** 解析失败nfo')
    print('***************************************')
    # 错误的nfo
    for item_fail_nfo_path in fail_nfo_list:
        print('[!] 解析失败:' + item_fail_nfo_path)


def download_image_emby(nfo_dir_path: str, actor_info: bool, actor_image: bool, actor_dir: str):
    """
        1、根据nfo文件下载演员信息
    :param nfo_dir_path:文件夹路径
    :param actor_info:是否需要现在演员信息 true:下载，false:不下载
    :return:
    """

    nfo_dir_path = Path(nfo_dir_path)
    # 看是否有该文件夹，没有则创建文件夹
    actor_pic_dir = Path(actor_dir, 'pic', time.strftime("%Y%m", time.localtime()))
    if not actor_pic_dir.exists():
        actor_pic_dir.mkdir(True)

    actor_info_dir = Path(actor_dir, 'info')
    if not os.path.exists(actor_info_dir):
        os.mkdir(actor_info_dir, True)

    count = 0
    count_all = str(len(nfo_dir_path.listdir()))
    print('[+] 找到' + count_all + ' 文件夹')
    # 解析失败的nfo集合
    fail_nfo_list = []
    for item_dir_path in nfo_dir_path.listdir():
        count = count + 1
        percentage = str(count / int(count_all) * 100)[:4] + '%'
        print('[+] ---------------------------------------------- - ' + percentage + ' [' + str(count) + '/' + count_all + '] - ' + item_dir_path)

        download_actor_list = []
        try:
            for item_nfo_path in [nfo_file_info for nfo_file_info in item_dir_path.walk(filter=FILES) if nfo_file_info.ext in ['.nfo']]:
                xml_dom = read_nfo(item_nfo_path)
                if not xml_dom:
                    fail_nfo_list.append(item_nfo_path)
                    continue
                # 获取根节点
                root = xml_dom.documentElement
                # 通过dom对象或根元素，再根据标签名获取元素节点，是个列表
                for item_name in root.getElementsByTagName('actor'):
                    temp_actor = {}

                    name = item_name.getElementsByTagName('name')[0].childNodes
                    if len(name) > 0:
                        name = name[0].data
                    else:
                        name = ''
                    temp_actor['name'] = name

                    profile = item_name.getElementsByTagName('profile')[0].childNodes
                    if len(profile) > 0:
                        profile = profile[0].data
                    else:
                        profile = ''
                    temp_actor['profile'] = profile

                    thumb = item_name.getElementsByTagName('thumb')[0].childNodes
                    if len(thumb) > 0:
                        thumb = thumb[0].data
                    else:
                        thumb = ''
                    temp_actor['thumb'] = thumb

                    download_actor_list.append(temp_actor)
        except:
            print("[#] 异常 {0}".format(item_dir_path))
        current_index = 0
        for item_actor in download_actor_list:
            current_index = current_index + 1
            if current_index > 11:
                print("[!] 只下载前11个演员")
                break
            print("[!] ----------------------")
            name = item_actor['name']
            thumb = item_actor['thumb']
            profile = item_actor['profile']

            print('[+] name:' + name)
            print('[+] thumb:' + thumb)
            print('[+] profile:' + profile)

            if item_exist(name) > 0:
                print("[!] {0} 已经存在".format(name))
                continue
            # ----------------------------------
            # 没有获取到id，并且下载现在信息
            if actor_info and len(profile) == 0:
                print('[!] 演员ID获取失败==>' + name)
                continue
            if actor_info:
                person_info = get_person_info_id(int(profile.split("/")[-1]))
                if len(person_info) != 0:
                    with open(Path(actor_info_dir, name + '.json'), 'w') as f:
                        f.write(json.dumps(person_info))

            # ----------------------------------
            if actor_image and len(thumb) == 0:
                print('[!] 获取头像地址失败==>' + name)
                continue
            if actor_image:
                file_name = name + '.' + thumb.split('.')[-1]
                if not download_image(thumb, actor_pic_dir, file_name):
                    print("[*] 下载失败停止5秒 " + name)
                    continue
                add_item((thumb, name, profile, Path(actor_pic_dir, file_name).replace("E:\私人资料\\", "")))
            time.sleep(1)

    print('***************************************')
    print('*********** 解析失败nfo')
    print('***************************************')
    # 错误的nfo
    for item_fail_nfo_path in fail_nfo_list:
        print('[!] 解析失败:' + item_fail_nfo_path)


# 下载图片
def download_image(image_url: str, dir_path: str, file_name: str):
    """
        1、下载图片
    :param image_url: 图片地址
    :param dir_path: 存放文件夹
    :param file_name: 文件名称
    :return:
    """
    # 设置图片name，注：必须加上扩展名
    filepath = Path(dir_path, file_name.replace('"', '\''))
    headers = {
        'authority': 'image.tmdb.org',
        'method': 'GET',
        'path': image_url.replace("http://image.tmdb.org", ""),
        'accept': 'image/avif,image/webp,image/apng,image/*,*/*;q=0.8',
        'scheme': 'https',
        'accept-encoding': 'gzip, deflate, br',
        'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'referer': 'https://www.themoviedb.org/',
        'sec-fetch-dest': 'image',
        'sec-fetch-mode': 'no-cors',
        'sec-fetch-site': 'cross-site',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'
    }
    # 下载开始时间
    start = time.time()
    size = 0  # 初始化已下载大小
    # 每次下载的数据大小
    chunk_size = 1024

    try:
        requests.DEFAULT_RETRIES = 5
        s = requests.session()
        s.keep_alive = False
        # stream=True必须写上
        response = s.get(image_url, stream=True, headers=headers)
    except requests.exceptions.ConnectionError as ex:
        print('[#] {0}'.format(ex))
        time.sleep(5)
        return False

    if response.status_code == 200:  # 判断是否响应成功
        # 下载文件总大小
        content_size = int(response.headers['content-length'])
        # 开始下载，显示下载文件大小
        # print('Start download,[File size]:{size:.2f} MB'.format(size=content_size / chunk_size / 1024))
        # 显示进度条
        with open(filepath, 'wb') as file:
            for data in response.iter_content(chunk_size=chunk_size):
                file.write(data)
                size += len(data)
                print('\r' + '[+] :%s%.2f%%' % ('>' * int(size * 50 / content_size), float(size / content_size * 100)), end=' ')
    end = time.time()  # 下载结束时间
    print('下载完成,时间: %.2f秒' % (end - start))  # 输出下载用时时间
    return True


def download_image1(image_url: str, dir_path: str, file_name: str):
    cmd = '"D:\\Program Files (x86)\\Internet Download Manager\\IDMan" /a /n /p "{}" /f "{}" /d "{}"'.format(dir_path, file_name, image_url)
    subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, stderr=subprocess.PIPE)
    return True


# ########################## 刮削视频
# 读取NFO文件
def read_nfo(nfo_file_path):
    try:
        with open(nfo_file_path, 'r', encoding='utf8') as fh:
            # parse()获取DOM对象
            xmm_dom = minidom.parse(fh)
        return xmm_dom
    except Exception as ex:
        return None


#  添加本地图片
def process_pic(pic_dir):
    for item_path in Path(pic_dir).walk(filter=FILES):
        print(item_path)
        add_item(('', str(item_path.stem), '', str(item_path)))


# ------------------------------------------------从网站拔取图片 TV
def get_poster_fanart(tmid):
    url = 'https://www.themoviedb.org/tv/{}0/seasons'.format(tmid)
    headers = {
        'Connection': 'close',
        'ache-control': 'public, max-age=0',
        'content-encoding': 'br',
        'content-language': 'zh-CN',
        'content-type': 'text/html;charset=utf-8',
        'etag': 'W/"da3497f5e7096ec76c3f61235aeafd50"',
        'server': 'openresty',
        'set-cookie': 'tmdb.prefs=%7B%22adult%22%3Atrue%2C%22i18n_fallback_language%22%3A%22en-US%22%2C%22locale%22%3A%22zh-CN%22%2C%22country_code%22%3A%22CN%22%2C%22timezone%22%3A%22Asia%2FShanghai%22%7D; _ga=GA1.2.115810927.1606998156; tmdb.session=Aa1AXLACbEscLxGu1tsWZLvb0waFJOBw9SoWC4er6kf5_1siRM7BfjfplrrMMrP9Gt1y7LWO55-rqQ6GLtw06Ej5gtFuvH2_i8yff1kDncgXW-ifoj2ciAnUKvvr62sGXBp94MNZ8jZDchDuOJVXhhnKSEv4GnqurDRIUGipUapOLLIbLZcqHXNhPR4YG43AOHC91unfkTM5xF4_Iv89UshUv_U5npYZ3-ygG4lENm_WU71xmWbIOv2qbAIbiuZK9UM7IMxMzFQZpFkbcwkDBKE8WEc8PEgjmB2Bqn9xYjSzFwFAtIgtns3Cvcy9T9B_81_D8EWcyOmh9xpgdiFUDqqDbDnEAslwSfxO_AeNBcNGZ43rKw0QBE9q_ocBQT_0WouIaP38XGzBhe1_uOnF-v6CHyfurHXU4ii31xV4oV4kPVqOrEvEo6U3keEdVWrK5MvBxTtyDcBoE3aJuNXaI7heTJINsu0hFStkrvGZo_z4z4k9Psq_hGfdm9lMzDJFuRjCwxkgGHNAvjWE9QssBDjQoUmY-FA4v3NHfbclavGX72ia4ivTVp1SoCfBh2x7DNo1PLNonlTq1aJ04fRy4B0zzDD3qyio0GWNFG8X_UYDqNHOMIm64y4Vq08FFX9M9hUV3b1-FdTOtB5k_a850KxbIPZDx8RM80trhXJe1STAuO2vyLdr3P6JrlvouyJsDzStEb8UK3n5y-gtLj8f1g5ouQOOmdoNAcuQ2i8rv09HD1csbsJNkXsAEjln4NVV1qosvgheg_5M-W-h2QF8BnwF-ZuLpSnEbPk-p_Q-XHfUK7fPvfOSkNBn5Evwzb2CItWiYgmn_Ib3VbcVmUH5TGhUFuIg4OBXh_J2FFSKzULg',
        'status': '304',
        'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',
        'vary': 'Accept-Encoding',
        'via': '1.1 067bf061b6b7522e316e649291f86333.cloudfront.net (CloudFront)',
        'x-amz-cf-id': 'N6hGgV3yt4hQy8Ef1TxchDLoa0dZPtkkCa7nei-8I9RND0ddsITQAw==',
        'x-amz-cf-pop': 'HKG62-C2',
        'x-cache': 'Miss from cloudfront',
        'x-content-type-options': 'nosniff',
        'x-frame-options': 'SAMEORIGIN',
        'x-xss-protection': '1; mode=block'
    }
    # TODO 增加连接重试次数(一共4次链接)
    sess = requests.Session()
    sess.mount('http://', HTTPAdapter(max_retries=3))
    sess.mount('https://', HTTPAdapter(max_retries=3))
    sess.keep_alive = False  # 关闭多余连接
    requests.adapters.DEFAULT_RETRIES = 5
    # 请求数据
    response_info = requests.get(headers=headers, url=url, proxies=proxies)
    if response_info.status_code != 200:
        return ''
    return response_info.text


# python .\tmdb-tools.py
if __name__ == '__main__':
    # check_for_database()
    # get_person_info_id(2963)
    # get_movie_info(529203)
    # print(get_poster_fanart(1429))
    # get_person_info_name('Tom Cruise')
    # download_image('http://image.tmdb.org/t/p/h632/lQtE0gsFf6T7QXFnq3InuIG9WE9.jpg', r'E:\临时文件\数据工厂', 'Jan Pavel Filipenský.jpg')
    process_pic(r'E:\emby\video_actor_pic\pic')
    # download_image_nfo(nfo_dir_path=r'E:\emby\高清影视', actor_info=False, actor_image=True, actor_dir=r'E:\emby\video_actor_pic')
