import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import datetime
import time
import re
import threading
import random


def append_to_file(file_path, text):
    # 获取当前时间
    current_time = datetime.datetime.now()
    # 格式化时间字符串
    formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S >>> ")
    text = formatted_time + text
    try:
        with open(file_path, 'a', encoding='utf-8') as file:
            file.write(text + '\n')
        # print("Text appended successfully.")
    except Exception as e:
        print(f"An error occurred: {e}")


def sanitize_filename(filename):
    if(filename == ""):
        filename = "titleNone"+str(random.randint(1, 65535))
    filename = filename.replace(" ", "_")    
    # 移除非法字符
    return re.sub(r'[<>:"/\|?*]', '', filename)

def download_video(downloadUrl, video_save_path):
    try:
        # 发送GET请求获取视频内容
        response = requests.get(downloadUrl, stream=True)
        response.raise_for_status()  # 检查请求是否成功
        # 打开一个文件以二进制写模式保存视频内容
        with open(video_save_path, 'wb') as file:
            for chunk in response.iter_content(chunk_size=8192):
                file.write(chunk)
        print(f"视频已成功下载并保存到 {video_save_path}")
        append_to_file(loge_file_path, f"视频已成功下载并保存到 {video_save_path}")
        return 0
    except requests.exceptions.RequestException as e:
        print(f"下载视频时出错: {e}")
        append_to_file(loge_file_path, f"下载视频时出错 {video_save_path}")
        return -1

def download_videos(threads,video_page_url, video_save_path,bool):
    if bool:
        #多线程
        thread = threading.Thread(target=parse_video_download_url, args=(video_page_url, video_save_path))
        threads.append(thread)
        thread.start()
    else:
        #单线程
        parse_video_download_url(video_page_url, video_save_path)

def parse_video_download_url(videoPageUrl, video_save_path):
    # 设置Edge驱动和选项
    options = webdriver.EdgeOptions()
    options.add_argument('--headless')  # 无头模式
    driver = webdriver.Edge(options=options)
    try:
        # 打开一个网页
        driver.get(videoPageUrl)
        # time.sleep(10)  # 等待页面加载

        # 使用显式等待等待 <source> 标签出现
        try:
            WebDriverWait(driver, 20).until(
                EC.presence_of_element_located((By.TAG_NAME, "source"))
            )
        except TimeoutException:
            print("Loading took too much time!")
            driver.quit()


        # 使用 BeautifulSoup 解析 HTML 源代码
        soup = BeautifulSoup(driver.page_source, 'html.parser')
        # 找到所有的 <source> 标签
        links = soup.find_all('source')
        # 打印每个链接的 href 属性
        for link in links:
            video_download_url = link.get('src')
            if(video_download_url != None):
                video_download_url = "https:"+video_download_url
                print(video_download_url)
                print("****************")
                ret = download_video(video_download_url, video_save_path)
                if ret == 0:
                    break
    except Exception as e:
        print(f"An error occurred: {e}")
    finally:
        driver.quit()  # 关闭 WebDriver

    # print("video_download_url == None")
    return None

def process_url(input_string):
    if input_string.startswith("//www.douyin.com"):
        return "https:" + input_string
    elif input_string.startswith("/video"):
        return "https://www.douyin.com" + input_string
    else:
        return "Invalid URL format"

# 定义一个函数用于滚动到页面底部
def scroll_to_bottom(driver):
    last_height = driver.execute_script("""
        var element = document.querySelector('#douyin-right-container > div.parent-route-container.route-scroll-container.IhmVuo1S');
        return element.scrollHeight
    """)
    while True:
        print(last_height)
        driver.execute_script("""
            var element = document.querySelector('#douyin-right-container > div.parent-route-container.route-scroll-container.IhmVuo1S');
            element.scrollTo(0, element.scrollHeight);
        """)

        time.sleep(2)  # 等待页面加载新内容
        new_height = driver.execute_script("""
            var element = document.querySelector('#douyin-right-container > div.parent-route-container.route-scroll-container.IhmVuo1S');
            return element.scrollHeight
        """)
        if new_height == last_height:
            print("already bottom!")
            break
        last_height = new_height

# 定义一个函数用于扫码登录
def scan_loading(driver,bool):
    try:
        # 等待弹窗出现，最多等待10秒
        close_button = WebDriverWait(driver, 10).until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, "#login-pannel > div > div > div.douyin-login__close.dy-account-close"))
        )
        if bool:
            # 提示用户手动扫码登录
            input("请手动扫码登录，完成后按回车键继续...")
            wait_check(driver)
        else:
            # 点击关闭按钮关闭弹窗
            close_button.click()

    except Exception as e:
        print(f"No popup found or unable to close it. Error: {e}")

# 定义一个函数用于密码登录
def auto_loading(driver):
    try:
        # 等待弹窗出现，最多等待10秒
        mima_button = WebDriverWait(driver, 10).until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, "#web-login-container > article > article > article > div.web-login-common-wrapper__tab > ul.web-login-tab-list > li:nth-child(3)"))
        )
        # 点击关闭按钮关闭弹窗  
        mima_button.click()

        mima_input_area = WebDriverWait(driver, 10).until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, "#web-login-container > article > article > article > form > div.web-login-account-password__account-input-wrapper > div > input"))
        )
        # 点击关闭按钮关闭弹窗  
        mima_input_area.click()

    except Exception as e:
        print(f"No popup found or unable to close it. Error: {e}")


def wait_check(driver):
    try:
        # 设置显式等待，最多等待10秒，直到class为"E5QmyeTo"的元素出现
        element = WebDriverWait(driver, 20).until(
            EC.presence_of_element_located((By.CLASS_NAME, "E5QmyeTo"))
        )
    except TimeoutException:
        print("Loading took too much time!")
        driver.quit()

def download_douyin_video(user_page_url,video_file_path):
    threads = []    
    # 设置Chrome驱动和选项
    options = webdriver.EdgeOptions()
    # options.add_argument('--headless')  # 无头模式
    driver = webdriver.Edge(options=options)
    driver.get(user_page_url)

    scan_loading(driver,1)  # 0 :点击关闭按钮关闭弹窗   1 :提示用户手动扫码登录
    scroll_to_bottom(driver)
    
    
    # 解析页面获取视频链接
    soup = BeautifulSoup(driver.page_source, 'html.parser')
    # lis = soup.find_all('li',class_='wqW3g_Kl WPzYSlFQ OguQAD1e')
    lis = soup.select('li.wqW3g_Kl.WPzYSlFQ.OguQAD1e')
    driver.quit()
    for li in lis:
        titleName = li.find('p',class_='eJFBAbdI H4IE9Xgd').text
        titleName = sanitize_filename(titleName)
        video_save_path = f"{video_file_path}{titleName}.mp4"
        video_page_url = process_url(li.find('a').get('href'))
        download_videos(threads,video_page_url, video_save_path,0)  # 0:单线程 1:多线程 
    # 等待所有线程完成
    for thread in threads:
        thread.join()


loge_file_path = "./parseVideo/log.txt"
video_file_path = "./parseVideo/"
user_page_url = f'https://www.douyin.com/user/MS4wLjABAAAATD7l0PcZ_GMAggwStSqoHcl06CH-ZKuTE_fwZhpuiGYaeDyGDnQUzFsRgPeLCimM?'
download_douyin_video(user_page_url,video_file_path)
    


# download_douyin_video(url,video_save_path)
#     #模拟通过浏览器打开url
#     #处理登陆弹窗，关掉or手动登录
#     # 解析页面获取视频链接


