import os
import random
import time
import requests
import re
from tqdm import tqdm
import glob
from userAgent import generate_user_agents
import threading

requests.packages.urllib3.disable_warnings()

# 设置请求头
headers = {'User-Agent': random.choice(generate_user_agents(100)), 'Connection': 'close'}

# 设置文件夹路径
# folder_path = 'testu8'
folder_path = 'm3u8'

# 获取文件列表并添加进度条
file_paths = list(tqdm(glob.glob(folder_path + '/**/*', recursive=True), desc="获取文件列表进度"))

# 定义m3u8的URL列表
m3u8Url = ["https://dtliving-sz.dingtalk.com/live_hp/",
           "https://dtliving-sh.dingtalk.com/live_hp/",
           "https://dtliving-bj.dingtalk.com/live_hp/"]


def wirtError(e):
    with open("error.log", "a", encoding="utf-8") as f:
        f.write(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, 错误信息: {e}\n")

def getStatusUrl(fileName, m3u8, i, proxiesIP):
    url = m3u8Url[i] + m3u8
    print(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, 错误信息: 第{i}次,正在检测{url}....")
    # log.write(f"第{i}次,正在检测{url}...."+"\n")
    response = resRequest(url, proxiesIP)
    if response.status_code == 200:
        print(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, 错误信息: 域名检测成功{m3u8}的域名是{m3u8Url[i]}")
        # log.write(f"域名检测成功{m3u8}的域名是{m3u8Url[i]}"+"\n")
        return m3u8Url[i]
    else:
        i += 1
        if len(m3u8Url) > i:
            print(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, 错误信息: 第{i},次检测{url}....")
            # log.write(f"第{i},次检测{url}...."+"\n")
            return getStatusUrl(fileName, m3u8, i, getproxies())
        else:
            log.write(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, 错误信息: 第{i},{url}域名匹配失败...." + "\n")
            with open("./errorUrl.txt", "a", encoding="utf-8") as f:
                f.write(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, 错误信息: {m3u8}没有找到合适的域名, 文件名称是: {fileName}" + "\n")


def getMp4Url(matches, fileName, m3u8, i, proxiesIP):
    urls = []
    status = getStatusUrl(fileName, m3u8, i, proxiesIP)
    if status is None:
        print("============================域名没有返回值==========================")
        with open("./errorUrl.txt", "a", encoding="utf-8") as f:
            f.write(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, 错误信息: {m3u8}域名没找到, 文件名称是: {fileName}" + "\n")
    else:
        for match in matches:
            url = status + match
            urls.append(url)
        return urls


def getproxies():
    try:
        response = requests.get("https://api.xiaoxiangdaili.com/ip/get?appKey=1167336741548740608&appSecret=qoc3awz8&cnt=1&wt=json")
        if response.json()["code"] == 1010:
            print(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, 错误信息: 我正在尝试获取代理ip......")
            time.sleep(10)
            return getproxies()
        else:
            ip = response.json()["data"][0]["ip"]
            prot = response.json()["data"][0]["port"]
            print(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, 错误信息: 我获取到代理ip了,{ip}:{prot}")
            return f"{ip}:{prot}"
    except Exception as e:
        wirtError(e)
        return getproxies()




def resRequest(item, proxiesIP):
    try:
        if proxiesIP == "0":
            response = requests.get(item, verify=False, headers=headers)
        else:
            response = requests.get(item, verify=False, headers=headers, proxies={"https": proxiesIP})
        return response
    except Exception as e:
        wirtError(e)
        print(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, 错误信息: 我在更换ip代理重新请求: {item}")
        time.sleep(15)
        return resRequest(item, getproxies())


def process_file(file_path):
    global log
    log = open("log.txt", "a", encoding="utf-8")
    if os.path.isfile(file_path):
        with open(file_path, 'r', encoding="utf-8") as file:
            content = file.read()
            fileName = os.path.basename(file_path).split(".")[0]
            pattern = r'([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\/[\d]+\.ts\?auth_key=[\d\w-]+)'
            matches = re.findall(pattern, content)
            urls = getMp4Url(matches, fileName, matches[0], 0, "0")
            # 处理每个文件中的链接列表并添加进度条
            for item in tqdm(urls, desc=f"处理 {fileName} 文件内链接进度"):
                time.sleep(2)
                response = resRequest(item, "0")
                # with open(r"E:\杰哥数学\{0}.mp4".format(fileName), "ab") as f:
                with open("/disk/data/杰哥数学/{0}.mp4".format(fileName), "ab") as f:
                    f.write(response.content)
    log.close()


# 创建线程列表
threads = []
# 遍历文件路径列表，为每个文件创建一个线程进行处理
for file_path in file_paths:
    # time.sleep(15)
    # proxiesIP = getproxies()
    # print(f"正在启动: {file_path}, 代理IP获取成功: {proxiesIP}")
    thread = threading.Thread(target=process_file, args=(file_path,))
    threads.append(thread)
    thread.start()

# 等待所有线程完成
for thread in threads:
    thread.join()
