# -*- coding: utf-8 -*-
# standard
import os
import sys
import json
import tempfile
import ctypes
from platform import system
from threading import Thread
from threading import RLock
from concurrent.futures import ThreadPoolExecutor
from queue import Queue
from io import BytesIO
from collections import deque

# third
import httpx
from tqdm import tqdm
# import ffmpeg  # pip install ffmpeg-python

# local
_P_PATH =  os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if _P_PATH not in sys.path:
    sys.path.append(_P_PATH)
from models import *


"""
@Title:  与视频下载有关的函数 
@File: download_funcs.py
@Author: walle 2023年12日08日 12时17分41秒
@Version: 1.0.0
@Desc: 
"""


COUNT = 0
SCP_TASKS = Queue()  # 拷贝任务队列
TIMEOUT_THREAD_IDS = Queue()  # 下载超时的任务队列
urllib3.disable_warnings()
DEFAULT_BROWSER_HEADERS = {  # 默认浏览器头
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:94.0) Gecko/20100101 Firefox/94.0",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
}
TEMP_DIR = os.path.join(R_P_PATH, "temp")
if not os.path.exists(TEMP_DIR):
    os.makedirs(TEMP_DIR)
MEDIAS = [
    {'video_id': 'CJoOwXcjhds', 'start_time': '233.266000', 'end_time': '239.367000'}, 
    {'video_id': 'AvWWVOgaMlk', 'start_time': '90.000000', 'end_time': '93.566667'}, 
    {'video_id': 'Y8HMIm8mdns', 'start_time': '171.607767', 'end_time': '174.607767'}, 
    {'video_id': 'akwvpAiLFk0', 'start_time': '144.680000', 'end_time': '150.000000'}
    ]
SERVER_HOST = "gpu1.welltop.tech"
SERVER_DEST_DIR = '/mnt/aiface/data/Wav2Lip/raw/AVSpeech/'
SERVER_USER = "root"
SERVER_PW = "Default01!"



def download_clip_by_ffmpeg(url: str, start_time: str, end_time: str):
    """
    @Title: 下载视频片段
    @File: download_clip_by_ffmpeg.py
    @Author: walle 2023年12日08日 12时17分41秒
    @Version: 1.0.0
    @Desc: 
    """
    headers = {  # 默认浏览器头
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:94.0) Gecko/20100101 Firefox/94.0",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
}
    save_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "ddd.mp4")
    (
           ffmpeg
               .input(url, headers=headers, ss=start_time, to=end_time)
               .output(save_path, format='mp4', r=25, vcodec='libx264',
                       crf=18, preset='fast', pix_fmt='yuv420p', acodec='aac', audio_bitrate=128000,
                       strict='experimental')
               .global_args('-y')
               .global_args('-loglevel', 'error')
               .run()
       )


def get_gpu1_work_work_dir_files():
    """
    获取gpu1 上 work_data 下的文件数量
    """
    key = "gup1_work_data_files"
    good_files = FileCacheManager.get(key)
    if good_files is None:
        good_files, _bad_files = ScpRecordOrm.view_remote_files(to_k=False, host=SERVER_HOST,
                                                            username=SERVER_USER, password=SERVER_PW,
                                                            dest_dir=SERVER_DEST_DIR)
        FileCacheManager.set(key=key, value=good_files, timeout=86400)
    return good_files


def mark_gpu_exist_files():
    """
    把gup1目录上已存在的文件的存储位置标记在数据库中。
    """
    good_files = get_gpu1_work_work_dir_files()
    if good_files is None:
        return
    path = f"{SERVER_HOST}:{SERVER_DEST_DIR}"
    db_session = new_db_session()
    db_files = db_session.query(MediaFileOrm)
    count = 0
    for db_file in db_files:
        key = f"{db_file.key}.mp4"
        if key in good_files:
            db_file.remote_storage = path + key
            count += 1
    db_session.commit()
    db_session.close()
    logger.debug(f"gpu1 上有 {count} 个已存在的文件")


def get_hk2_work_work_dir_files():
    """
    获取hk2 上 work_data 下的文件数量
    """
    key = "hk02_video_backup"
    good_files = FileCacheManager.get(key)
    if good_files is None:
        username = "spider"
        password = "jingying"
        host = "47.243.81.213"
        dest_dir = "/mnt/projects/work_data/"
        good_files, _bad_files = ScpRecordOrm.view_remote_files(to_k=False, host=host,
                                                            username=username, password=password,
                                                            dest_dir=dest_dir)
        FileCacheManager.set(key=key, value=good_files, timeout=86400)
    return good_files


def print_hk2_exist_files():
    """
    把hk2目录上已存在的文件的存储位置打印一下，目的时为了看是否有重复的文件
    """
    good_files = get_hk2_work_work_dir_files()
    if good_files is None:
        return
    # path = "gpu1.welltop.tech:/mnt/aiface/data/i2v/work_data/"
    db_session = new_db_session()
    db_files = db_session.query(MediaFileOrm)
    count = 0
    for db_file in db_files:
        key = f"{db_file.key}.mp4"
        if key in good_files:
            # db_file.remote_storage = path + key
            print(f"{key} 已下载")
            count += 1
    # db_session.commit()
    db_session.close()
    logger.debug(f"gpu1 上有 {count} 个已存在的文件")
    
    
def download_by_requests(resource_url: str) -> Resp:
    """
    使用requests下载

    :param resource_url: _description_
    :return: _description_
    """
    resp = Resp()
    response = None
    try:  
        response = requests.get(url=resource_url, headers=DEFAULT_BROWSER_HEADERS, verify=False, timeout=60)
    except Exception as e:
        logger.exception(e)
        resp.message = f"下载出错： {e}"
    finally:
        if not resp:
            pass
        else:
            if response.status_code != 200:
                resp.message = f"下载拒绝 {response.status_code}"
            else:
                resp.data = response.content
        return resp
    

def download_by_requests_has_progress_bar(url: str = "https://download.oracle.com/java/17/latest/jdk-17_windows-x64_bin.msi", 
                                          timeouts: Queue = TIMEOUT_THREAD_IDS):
    """
    使用进度条的request下载. 不适合大文件，因为会导致内存占用过高，大文件请替换成 tempfile 模式
    :param url: _description_
    """
    resp = Resp()
    # todo:调试手动检测下载超时的时候再开启
    # current_thread_id = threading.current_thread().ident
    # t = threading.Timer(interval=1, function=add_timeout_thread_queue, kwargs={"thread_id": current_thread_id, "timeouts": timeouts})
    # t.start()
    # while 1:
    #     sleep(1)
    temp_file = BytesIO() 
    total = 0
    with requests.get(url, stream=True, headers=DEFAULT_BROWSER_HEADERS, verify=False, timeout=60) as response:
        if response.status_code != 200:
            resp.message = f"{url} 下载失败, 状态码： {response.status_code}"
        else:
            total = int(response.headers["Content-Length"])  # 获取下载文件总尺寸
            # 注意tqmd进度条的在这里的使用方法。
            with tqdm(total=total, unit_scale=True, unit_divisor=1024, unit="B") as progress:
                try:
                    for chunk in response.iter_content(chunk_size=1024):  # chunk_size 必须指定，否则速度计算不准
                        temp_file.write(chunk)
                        progress.update(len(chunk))
                except Exception as e:
                    resp.error(e)
                finally:
                    if resp:
                        if total == temp_file.tell():  # tell 返回当前流的位置
                            pass
                        else:
                            resp.message = f"{url} 下载失败, 文件大小不匹配,期待：{total}, 实际：{temp_file.tell()}"
    if resp:
        resp.data = temp_file.getvalue()  # io 模块特有的方法，不用seek 也能一次性取出所有的内容
    delay = randint(3, 10)
    logger.debug(f"随机等待 {delay} 秒")
    sleep(delay)
    return resp


def download_by_httpx_has_progress_bar(url: str = "https://download.oracle.com/java/17/latest/jdk-17_windows-x64_bin.msi", timeouts: Queue = TIMEOUT_THREAD_IDS) -> Resp:
    """
    带进度条的下载，使用的是 httpx，只支持二进制的返回体, 注意rich进度条不能激活两次
    """
    
    resp = Resp()
    # todo:调试手动检测下载超时的时候再开启
    # current_thread_id = threading.current_thread().ident
    # t = threading.Timer(interval=1, function=add_timeout_thread_queue, kwargs={"thread_id": current_thread_id, "timeouts": timeouts})
    # t.start()
    # while 1:
    #     sleep(1)
    with tempfile.TemporaryFile(dir=TEMP_DIR) as temp_file:
        with httpx.stream(method="GET", url=url, headers=DEFAULT_BROWSER_HEADERS) as response:
            if response.status_code != 200:
                resp.message = f"{url} 下载失败, 状态码： {response.status_code}"
            else:
                total = int(response.headers["Content-Length"])
                with tqdm(total=total, unit_scale=True, unit_divisor=1024, unit="B") as progress:
                    num_bytes_downloaded = response.num_bytes_downloaded
                    for chunk in response.iter_bytes():
                        temp_file.write(chunk)
                        progress.update(response.num_bytes_downloaded - num_bytes_downloaded)
                        num_bytes_downloaded = response.num_bytes_downloaded
        if resp:
            temp_file.seek(0)
            resp.data = temp_file.read()
    return resp
    

def add_timeout_thread_queue(thread_id: int, timeouts: Queue):
    """
    添加超时线程到队列
    """
    timeouts.put(thread_id)


def kill_timeout_threads(timeouts: Queue):
    """
    杀死超时的下载任务队列
    :param tasks: _description_
    """
    while 1:
        timeout_thread_id = timeouts.get()
        result = ctypes.pythonapi.PyObject_IsInstance(timeout_thread_id, ctypes.py_object(threading.Thread))
        result
    logger.debug(f"kill_timeout_threads function terminal!")


def download_func(key: str, page_url: str, save_path: str, audio_path: str, need_audio: int, tasks: Queue = None, timeouts: Queue = None):
    resp = MediaFileOrm.analysis_media_url(page_url=page_url, need_audio=need_audio)
    if not resp:  # 解析失败
        DB_THREAD_LOCK.acquire()
        db_session = new_db_session()
        db_session.query(MediaFileOrm).filter(MediaFileOrm.key == key).update({"error": resp.message})
        db_session.commit()
        db_session.close()
        DB_THREAD_LOCK.release()
    else:
        resource = resp.data
        caption = resp.data['caption']
        download_begin = datetime.now()
        video_url, audio_url = resp.data['resource_url']
        quality = resp.data['current_quality']
        logger.debug(f"{key} 开始下载")
        update = {"download_begin": None}
        # 检查视频是否需要下载
        # todo: logger.debug("暂时只下音频")
        if save_path == "":  
            
            resp = download_by_requests_has_progress_bar(url=video_url, timeouts=timeouts)
            if resp:
                logger.debug(f"{key}  视频下载成功")
                save_path = MediaFileOrm.generate_save_file_path(key)
                with open(save_path, mode="bw") as f:
                    f.write(resp.data)
                update['save_path'] = save_path
            else:
                logger.error(f"{key} 视频下载失败")
                update['error'] = resp.message
            update["caption"] = caption
            update["resource"] = resource
            update["quality"] = quality
            update["download_begin"] = download_begin
            update["download_end"] = datetime.now()
        else:
            # logger.debug("暂时只下音频")
            logger.debug(f"{key} 视频下无需下载")
        
        # 检查音频是否需要下载
        if resp and need_audio == 1 and audio_path == "":
            if audio_url is None:
                error = f"{key} 缺少对应的音频文件"
                logger.error(error)
                update['error'] = error
            else:
                resp = download_by_requests_has_progress_bar(url=audio_url, timeouts=timeouts)
                if resp:
                    logger.debug(f"{key}  音频下载成功")
                    audio_path = MediaFileOrm.generate_save_file_path(key, ".m4a")
                    with open(audio_path, mode="bw") as f:
                        f.write(resp.data)
                    update['audio_path'] = audio_path
                    update['audio_size'] = os.path.getsize(audio_path)
                else:
                    logger.error(f"{key} 音频下载失败")
                    update['error'] = resp.message
                update["resource"] = resource
                if update["download_begin"] is None:
                    update["download_begin"] = download_begin
                update["download_end"] = datetime.now()
        else:
            logger.debug(f"{key} 音频下无需下载")
        
        DB_THREAD_LOCK.acquire()
        db_session = new_db_session()
        db_session.query(MediaFileOrm).filter(MediaFileOrm.key == key).update(update)
        db_session.commit()
        db_session.close()
        if resp:
            global COUNT
            COUNT += 1
            if tasks is None:
                logger.debug(f"第{COUNT} 号下载任务({key})已完成")
            else:
                if save_path:
                    task = ScpTask(num=COUNT, key=key, sources=[save_path])
                    tasks.put(task)
                    logger.debug(f"第{COUNT} 号视频拷贝任务已装入{key}")
                if audio_path:
                    task = ScpTask(num=COUNT, key=key, sources=[audio_path])
                    tasks.put(task)
                    logger.debug(f"第{COUNT} 号音频拷贝任务已装入{key}")
        DB_THREAD_LOCK.release()
        
    
def scp_func(tasks: Queue):
    """
    scp 下载
    """
    ScpRecordOrm.check_downloaded_folder(delete_file=True)  # 检查已下载的视频的大小是否合法？未确认是否合法之前暂不执行删除。
    need_scp_tasks = ScpRecordOrm.get_need_copy_records()  # 取出已下载的视频的清单
    need_scp_tasks.sort(key=lambda x: x['create_time'], reverse=False)
    num = 0
    for x in need_scp_tasks:
        num += 1
        tasks.put(ScpTask(num=num, key=x['key'], sources=x['sources']))
    logger.debug(f"从{DOWNLOAD_DIR}目录加载了 {num} 个待等待scp的视频放入工作队列")
    ScpRecordOrm.copy_to_remote_forever(tasks=tasks)


def start_download_by_thread_pool(download: bool = True, scp: bool = True) -> Resp:
    """
    以线程池的方式执行下载和拷贝任务
    :param download: 是否开启下载, defaults to True
    :param scp: 是否开启拷贝, defaults to True
    :return: _description_
    """
    logger.debug("正在从数据库查询待下载的文件....")
    db_session = new_db_session()
    cond = or_(
        and_(
        MediaFileOrm.remote_storage == "",
        MediaFileOrm.error == "",
        MediaFileOrm.save_path == ""
    ),and_(
        MediaFileOrm.need_audio == 1,
        MediaFileOrm.audio_path == "",
    ))
    # 
    
    files = [{
        "key": x.key,
        "page_url": x.page_url,
        "save_path": x.save_path,  # 非空表示视频已下载
        "audio_path": x.audio_path,  # 非空表示音频已下载
        "need_audio": x.need_audio,  # 1 表示下载音频，注意这个是临时性的为了已完成的文件中，部分视频文件缺少音频的问题
    } for x in tqdm(db_session.query(MediaFileOrm).filter(cond).order_by(MediaFileOrm.key.desc()).all())]
    db_session.close()
    logger.debug(f"从数据库加载了 {len(files)} 个待下载的视频")
    if system().lower().startswith("win"):
        max_workers = 3
    else:
        max_workers = 60
    logger.info(f"当前线程数设定: {max_workers}")
    with ThreadPoolExecutor(max_workers=max_workers) as executor:  # max_workers 最小是2，因为scp会占一个线程，下载任务至少需要一个线程
        executor.submit(kill_timeout_threads, timeouts=TIMEOUT_THREAD_IDS)
        futures: List[Future] = []
        # todo: 临时注销，为了测试下载超时，测试完毕后即取消
        if scp:
            future = executor.submit(scp_func, tasks=SCP_TASKS)  
            futures.append(future)
        if download:
            for file in files:
                kw = {
                    "key": file["key"], 
                    "page_url": file["page_url"], 
                    "save_path": file['save_path'],
                    "audio_path": file['audio_path'],
                    "need_audio": file['need_audio'],
                    "tasks": SCP_TASKS, "timeouts": 
                    TIMEOUT_THREAD_IDS}
                executor.submit(download_func, **kw)
        for future in as_completed(futures):
            result = future.result()
            logger.debug(f"future result: {result}")
    executor.shutdown()
        
        


if __name__ == '__main__':
    # download_func(key="kFX2LrNE3VI", 
    #               page_url="https://www.youtube.com/watch?v=kFX2LrNE3VI",
    #               save_path="",
    #               audio_path="",
    #               need_audio=1, )
    # download_by_requests_has_progress_bar()
    # download_by_httpx_has_progress_bar()
    # scp_func(tasks=SCP_TASKS)
    # print_hk2_exist_files()
    start_download_by_thread_pool(download=True, scp=False)
    pass