import os
import time
import requests
import openpyxl
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from typing import List, Dict
from moviepy import VideoFileClip
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import csv
from datetime import datetime
import multiprocessing

# Constants
URLS_TXT_PATH = 'D:/douyin_download/urls.txt'
USER_LINKS_PATH = 'D:/douyin_download/user.xlsx'
download_excle='D:/已下载.xlsx'
DOWNLOAD_DIR = 'D:/抖音/video'
AUDIO_DIR = 'D:/抖音/video'  # 新增的常量，用于存放 MP3 文件
CHROME_USER_DATA_DIR = "C:/Users/姚望/AppData/Local/Google/Chrome/User Data"
SCROLL_PAUSE_TIME = 10
proxy = {
    'http': 'http://127.0.0.1:8888',
    'https': 'http://127.0.0.1:8888',
}


def initialize_driver() -> webdriver.Chrome:
    """初始化 Selenium WebDriver."""
    print(f"初始化 Selenium WebDriver")
    options = webdriver.ChromeOptions()
    # options.add_argument('--headless')  # 无头模式
    options.add_argument('--disable-gpu')  # 禁用 GPU（可选）
    options.add_argument(f"user-data-dir={CHROME_USER_DATA_DIR}")

    # 可指定路径，如 Service('path/to/chromedriver')
    driver = webdriver.Chrome(options=options)
    return driver


def read_urls_from_excel(file_path: str = USER_LINKS_PATH) -> List[Dict[str, str]]:
    """从 Excel 文件中读取 URLs 和用户名."""
    urls = []
    try:
        workbook = openpyxl.load_workbook(file_path)
        sheet = workbook.active
        for row in sheet.iter_rows(min_row=2, values_only=True):  # Skip header row
            url, username = row
            if url and username:
                urls.append({'url': url, 'username': username})
        print(f"从 Excel 读取 {len(urls)} 个 URL 和用户名.")
    except FileNotFoundError:
        print(f"文件未找到: {file_path}")
    except Exception as e:
        print(f"读取 Excel 时发生错误: {e}")
    return urls

def get_urls_from_webdriver(url_info: Dict[str, str]) -> None:
    """使用 Selenium WebDriver 访问并滚动每个 URL."""
    driver = initialize_driver()
    print("浏览器已打开。")
    try:
        url = url_info['url']  # Extract URL from the dictionary
        driver.get(url)
        print(f"访问 URL: {url}")
        container = WebDriverWait(driver, 30).until(
            EC.presence_of_element_located((By.XPATH, '//*[@id="user-tabbar-2"]/div[2]/div/div'))
        )
        print("找到容器元素。")
        scroll_container(driver)
    except Exception as e:
        print(f"发生异常: {e}")
    finally:
        time.sleep(10)
        driver.quit()
        print("浏览器已关闭。")






def scroll_container(driver: webdriver.Chrome) -> None:
    """滚动指定的容器并根据指定元素的出现情况决定是否继续滚动."""
    try:
        container1 = WebDriverWait(driver, 30).until(
            EC.presence_of_element_located((By.XPATH, '//*[@id="douyin-right-container"]/div[2]'))
        )
        last_height = driver.execute_script("return arguments[0].scrollHeight", container1)
        print(f"初始容器高度: {last_height}")

        while True:
            time.sleep(SCROLL_PAUSE_TIME)
            driver.execute_script("arguments[0].scrollTo(0, arguments[0].scrollHeight);", container1)
            # 判断指定元素是否出现
            try:
                time.sleep(SCROLL_PAUSE_TIME)
                # 替换 'YOUR_SPECIFIED_XPATH' 为实际的 XPath

                x = driver.find_element(By.XPATH,
                                        '//*[@id="douyin-right-container"]/div[2]/div/div/div/div[3]/div/div/div[2]/div[2]/div[2]/div/div').text
                #
                if x == '暂时没有更多了':
                    print("指定元素已出现，停止滚动。" + x)
                    break
                else:
                    print("指定元素未出现，继续滚动。")
                    continue
            except Exception:
                print("指定元素未出现，继续滚动。")
    except Exception as e:
        print(f"滚动过程中发生错误: {e}")

def read_url_parameters(file_path: str = URLS_TXT_PATH) -> List[Dict[str, str]]:
    """
    读取 URLS_TXT_PATH 文件并解析每个段落的 requestURL, Cookie, User-Agent, Referer.

    文件格式示例:
    requestURL: https://www.douyin.com/aweme/1
    Cookie: ttwid=1%7C--54IaQ...
    User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64)...
    Referer: https://www.douyin.com/user/MS4wLjAB...

    ---

    requestURL: https://www.douyin.com/aweme/2
    Cookie: ttwid=1%7C--54IaQ...
    User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64)...
    Referer: https://www.douyin.com/user/MS4wLjAB...
    """
    parameters_list = []
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
        # os.remove(file_path)
        # Split content into paragraphs separated by two newlines
        paragraphs = content.strip().split('\n\n')
        for para in paragraphs:
            param = {}
            lines = para.strip().split('\n')
            for line in lines:
                if ':' in line:
                    key, value = line.split(':', 1)
                    param[key.strip()] = value.strip()
            if {'requestURL', 'Cookie', 'User-Agent', 'Referer'}.issubset(param.keys()):
                parameters_list.append(param)
            else:
                print("警告: 某个段落缺少必要的字段。")
        print(f"从文件读取 {len(parameters_list)} 组参数。")
    except FileNotFoundError:
        print(f"文件未找到: {file_path}")
    except Exception as e:
        print(f"读取文件时发生错误: {e}")
    return parameters_list

def sanitize_filename(filename: str) -> str:
    """清理文件名中的非法字符，只去掉 Windows 系统中不允许的字符."""
    # Windows 不允许的字符
    invalid_chars = '<>:"/\\|?*'
    result = filename
    for char in invalid_chars:
        result = result.replace(char, '')
    return result.rstrip()

def process_download_batch(task_batch, num_threads: int):
    """处理下载任务的批次，使用线程池执行下载任务。"""
    with ThreadPoolExecutor(max_workers=num_threads) as thread_executor:
        futures = []
        for task in task_batch:
            future = thread_executor.submit(download_video_content, *task)
            futures.append(future)
        
        # 等待所有任务完成
        for future in futures:
            try:
                future.result()
            except Exception as e:
                print(f"下载任务失败: {str(e)}")

def download_video(url_info: List[Dict[str, str]]):
    """
    下载视频，使用多进程和多线程并发下载。
    """
   
    
    # 创建下载记录文件
    download_record_file = 'download_records.csv'
    failed_download_record_file = 'failed_download_records.csv' 

    if not os.path.exists(download_record_file):
        with open(download_record_file, 'w', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow(['URL', 'Title', 'Download Time'])
     # 新增：创建失败记录文件
    if not os.path.exists(failed_download_record_file):
        with open(failed_download_record_file, 'w', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow(['URL', 'Title', 'Error Message'])
    
    # 读取已下载记录
    downloaded_urls = set()
    with open(download_record_file, 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        next(reader)  # 跳过表头
        for row in reader:
            downloaded_urls.add(row[0])
            
    parameters_list = read_url_parameters()
    # 创建下载任务列表
    download_tasks = []
    
    for params in parameters_list:
        headers = {
            'Cookie': params['Cookie'],
            'User-Agent': params['User-Agent'],
            'Referer': params['Referer']
        }
        
        requestURL = params['requestURL']
        username = url_info['username']
        
        if requestURL in downloaded_urls:
            print(f"视频已下载过，跳过: {requestURL}")
            continue
            
        try:
            session = requests.Session()
            res = session.get(requestURL, headers=headers, proxies=proxy, verify=False, timeout=10)
            res.raise_for_status()
            
            aweme_list = res.json().get("aweme_list", [])
            for aweme in aweme_list:
                title = aweme.get("desc", "").replace('\n', '').strip() or time.strftime("%Y%m%d_%H%M%S")
                video_url = aweme.get("video", {}).get("play_addr", {}).get("url_list", [-1])[-1]
                
                if video_url == -1:
                    print(f"未找到视频 URL: {requestURL}")
                    with open(failed_download_record_file, 'a', newline='', encoding='utf-8') as f:
                        writer = csv.writer(f)
                        writer.writerow([video_url, 'aaaa', "未找到视频 URL"])
                    continue
                    
                user_folder = os.path.join(DOWNLOAD_DIR, username)
                os.makedirs(user_folder, exist_ok=True)
                
                download_tasks.append((video_url, title, headers, user_folder, requestURL))
                
        except Exception as e:
            print(f"获取视频信息失败: {str(e)}")
            # 新增：记录失败信息到失败记录文件
            with open(failed_download_record_file, 'a', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow([video_url, title, str(e)])
    
    # 使用多进程+多线程下载，充分利用8核16线程
    num_processes = 8  # 使用8个进程
    num_threads = 2  # 每个进程2个线程，总共16个线程

    # 将任务分成多个批次
    batch_size = max(1, len(download_tasks) // num_processes)
    task_batches = [download_tasks[i:i + batch_size] for i in range(0, len(download_tasks), batch_size)]
    
    # 使用进度条显示总体下载进度
    with tqdm(total=len(download_tasks), desc="总体下载进度") as total_pbar:
        with ProcessPoolExecutor(max_workers=num_processes) as process_executor:
            futures = []
            for batch in task_batches:
                future = process_executor.submit(process_download_batch, batch, num_threads)
                futures.append((future, len(batch)))
            
            # 等待所有进程完成并更新总进度条
            for future, batch_size in futures:
                try:
                    future.result()
                    total_pbar.update(batch_size)
                except Exception as e:
                    print(f"进程执行失败: {str(e)}")


def download_video_content(video_url: str, title: str, headers: Dict[str, str], user_folder: str, request_url: str) -> None:
    """
    下载视频内容并保存到本地，支持断点续传。
    """
    session = requests.Session()
    retries = Retry(total=5, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504, 429])
    session.mount('http://', HTTPAdapter(max_retries=retries))
    session.mount('https://', HTTPAdapter(max_retries=retries))

    file_path = None
    try:
        safe_title = sanitize_filename(title)
        file_path = os.path.join(user_folder, f"{safe_title}.mp4")
        temp_path = file_path + '.tmp'
        
        # 检查是否已下载完成
        if os.path.exists(file_path):
            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            file_path = os.path.join(user_folder, f"{safe_title}_{timestamp}.mp4")  # 修改文件名
            print(f"文件已存在，使用新文件名进行下载: {file_path}")
            
        # 获取文件大小
        response = session.get(
            video_url,
            stream=True,
            headers=headers,
            proxies=proxy,
            verify=False,
            timeout=(5, 30)
        )
        response.raise_for_status()
        total_size = int(response.headers.get('content-length', 0))
        
        # 断点续传
        downloaded_size = 0
        if os.path.exists(temp_path):
            downloaded_size = os.path.getsize(temp_path)
            if downloaded_size < total_size:
                headers['Range'] = f'bytes={downloaded_size}-'
                response = session.get(
                    video_url,
                    stream=True,
                    headers=headers,
                    proxies=proxy,
                    verify=False,
                    timeout=(5, 30)
                )
            
        mode = 'ab' if downloaded_size > 0 else 'wb'
        chunk_size = 1024 * 1024
        
        # 显示单个文件的下载进度条
        with tqdm(
            total=total_size,
            initial=downloaded_size,
            unit='iB',
            unit_scale=True,
            desc=f"下载 {safe_title}",
            position=0,
            leave=True
        ) as pbar:
            with open(temp_path, mode) as f:
                for chunk in response.iter_content(chunk_size=chunk_size):
                    if chunk:
                        size = f.write(chunk)
                        pbar.update(size)
        
        # 验证文件大小并重命名
        if os.path.getsize(temp_path) == total_size:
            os.rename(temp_path, file_path)
            print(f"已成功下载并保存视频: {file_path}")
            
            # 记录下载信息
            with open('download_records.csv', 'a', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow([video_url, title, datetime.now().strftime('%Y-%m-%d %H:%M:%S')])
        else:
            print(f"严重错误: 下载的文件大小不正确，保留文件: {temp_path}")  # 保留文件
            raise Exception("下载的文件大小不正确")
            
    except Exception as e:
        print(f"下载视频失败: {str(e)}")
        # 记录失败信息到失败记录文件
        with open('failed_download_records1.csv', 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow([video_url, title, str(e)])  # 记录 URL, 视频名, 失败详细
        # if file_path and os.path.exists(temp_path):
        #     os.remove(temp_path)  # 仅在下载失败时删除临时文件
    finally:
        session.close()


def extract_audio_from_all_videos() -> None:
    """
    遍历下载目录中的所有视频文件，提取音频并保存为 mp3 文件。
    """
    print("开始批量提取音频...")
    os.makedirs(AUDIO_DIR, exist_ok=True)
    for filename in os.listdir(DOWNLOAD_DIR):
        if filename.lower().endswith('.mp4'):
            video_path = os.path.join(DOWNLOAD_DIR, filename)
            try:
                clip = VideoFileClip(video_path)
                audio = clip.audio
                if audio:
                    mp3_path = os.path.join(AUDIO_DIR, os.path.splitext(filename)[0] + '.mp3')
                    audio.write_audiofile(mp3_path)
                    print(f"已提取音频并保存为: {mp3_path}")
                else:
                    print(f"视频中未找到音频: {video_path}")
            except Exception as e:
                print(f"提取音频时发生错误 ({video_path}): {e}")
    print("音频提取完成。")


def main():
    """主函数."""
    try:
        os.remove(URLS_TXT_PATH)
    except:
        print("文件不存在")
    urls = read_urls_from_excel()  # Read URLs and usernames from Excel
    for url_info in urls:  # Iterate over the list of dictionaries
        get_urls_from_webdriver(url_info)  # Pass the dictionary to the WebDriver
        download_video(url_info)  # Pass the entire list of URLs and usernames to download_video
        os.remove(URLS_TXT_PATH)

if __name__ == "__main__":
    main()
