import requests
from http.server import BaseHTTPRequestHandler, HTTPServer
import threading
import time
import datetime
import os
import urllib.parse

# ====== 配置 ======
urls = [
    "http://47.120.41.246:8899/zb.txt",
    "https://3043.kstore.space/bhvip/bhzb.txt",
    "https://tv.850930.xyz/kdsb.txt",
    "https://raw.githubusercontent.com/kimwang1978/collect-tv-txt/refs/heads/main/merged_output.txt",
    "https://raw.githubusercontent.com/jiangnan1224/iptv_ipv4_live/refs/heads/main/live_ipv4.txt",
    "https://raw.githubusercontent.com/Guovin/iptv-api/gd/output/result.txt",
    "https://raw.githubusercontent.com/wwb521/live/refs/heads/main/tv.txt",
    "https://raw.githubusercontent.com/MemoryCollection/IPTV/refs/heads/main/hotel.txt",
    "https://raw.githubusercontent.com/mlzlzj/hnyuan/refs/heads/main/iptv_list.txt",
    "https://raw.githubusercontent.com/Supprise0901/TVBox_live/main/live.txt",
    "https://raw.githubusercontent.com/gaotianliuyun/gao/master/list.txt",
    "https://raw.githubusercontent.com/zwc456baby/iptv_alive/master/live.txt",
    "https://raw.githubusercontent.com/vbskycn/iptv/master/tv/iptv4.txt",
    "https://raw.githubusercontent.com/vbskycn/iptv/master/tv/hd.txt",
    "https://raw.githubusercontent.com/junge3333/juds6/main/yszb1.txt",
    "https://raw.githubusercontent.com/zzmaze/iptv/main/itvlist.txt",
    "https://raw.githubusercontent.com/maitel2020/iptv-self-use/main/iptv.txt",
    "https://raw.githubusercontent.com/n3rddd/CTVLive/refs/heads/main/live.txt",
    "https://raw.githubusercontent.com/xiongjian83/TvBox/refs/heads/main/live.txt",
    "https://raw.githubusercontent.com/yoursmile66/TVBox/refs/heads/main/live.txt",
    "https://raw.githubusercontent.com/yuanzl77/IPTV/refs/heads/main/live.txt",
    "https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1715581924111/live1.txt",
    "https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1715581924675/live2.txt",
    "https://cc-im-kefu-cos.7moor-fs2.com/im/2768a390-5474-11ea-afc9-7b323e3e16c0/d4fe44c5-107c-4511-af02-aa08fb10dff7/2024-04-25/2024-04-25_17:22:21/1714036941087/98644330/wexiptv.txt",
    "https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1716213337323/live3.txt",
    "https://live.zbds.top/tv/iptv4.txt",
    "https://live.zbds.top/tv/iptv6.txt",
    "https://gitlab.com/p2v5/wangtv/-/raw/main/lunbo.txt",
    "https://m3u.ibert.me/txt/fmml_ipv6.txt",
    "https://m3u.ibert.me/txt/ycl_iptv.txt",
    "https://m3u.ibert.me/txt/y_g.txt",
    "https://raw.githubusercontent.com/alenin-zhang/IPTV/refs/heads/main/lenin.txt",
    "https://15280.kstore.space/%E5%A5%A5%E5%8F%B0.txt",
    "http://38.165.20.168/js/%E7%9B%B4%E6%92%AD.txt",
    "https://gitee.com/tushaoyong/live/raw/master/%E6%8E%A5%E5%8F%A3/IPV6.txt"
]

# 文件保存目录
LOCAL_DIR = "E:\\gg\\tvyuan\\showTV\\show"
OUTPUT_FILE = os.path.join(LOCAL_DIR, "live.txt")

# 锁，保证 fetch_all 不会并发执行
fetch_lock = threading.Lock()


def fetch_all():
    """抓取所有地址并保存到本地文件"""
    with fetch_lock:
        all_content = []
        print("开始请求地址...")

        for url in urls:
            try:
                print(f"请求: {url}")
                resp = requests.get(url, timeout=15)
                resp.encoding = "utf-8"
                text = resp.text

                if any(ch in text for ch in ["#EXTM3U", "#genre#", "http://", "https://"]):
                    all_content.append(text)
                    print(f"✅ 成功: {url}")
                else:
                    print(f"⚠️ 可能是乱码: {url}")
            except Exception as e:
                print(f"❌ 请求失败: {url} - {e}")

        final_result = "\n".join(all_content)

        os.makedirs(LOCAL_DIR, exist_ok=True)
        with open(OUTPUT_FILE, "w", encoding="utf-8") as f:
            f.write(final_result)

        print(f"✔️ 汇总完成，共 {len(all_content)} 个源，已保存到 {OUTPUT_FILE}")


def scheduler():
    """每天凌晨 0 点刷新"""
    while True:
        now = datetime.datetime.now()
        tomorrow = (now + datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
        seconds = (tomorrow - now).total_seconds()
        print(f"⏳ 距离下次刷新还有 {int(seconds)} 秒")
        time.sleep(seconds)
        print("🌙 午夜刷新开始")
        fetch_all()


class RequestHandler(BaseHTTPRequestHandler):
    def do_GET(self):
        # 打印原始请求，便于排查
        print(f"\n[REQUEST] client={self.client_address} raw_path={self.path}")

        parsed = urllib.parse.urlparse(self.path)
        path = urllib.parse.unquote(parsed.path)   # 去掉 query，并解码 percent-encoding
        path = os.path.normpath(path)

        if self.path == "/update":
            # 手动触发更新
            threading.Thread(target=fetch_all, daemon=True).start()
            self.send_response(200)
            self.send_header("Content-Type", "text/plain; charset=utf-8")
            self.end_headers()
            msg = "更新任务已触发，请稍后查看 output.txt"
            self.wfile.write(msg.encode("utf-8"))
            return

        # 只允许按单个文件名访问（basename），拒绝子目录和路径穿越
        filename = os.path.basename(path)
        file_path = os.path.join(LOCAL_DIR, filename)

        print(f"  -> serving filename={filename} file_path={file_path} abs_local={os.path.abspath(LOCAL_DIR)}")
        if os.path.isfile(file_path):
            if filename.endswith(".json"):
                content_type = "application/json; charset=utf-8"
            elif filename.endswith(".jar"):
                content_type = "application/java-archive"
            elif filename.endswith(".txt"):
                content_type = "text/plain; charset=utf-8"
            elif filename.endswith(".png"):
                content_type = "text/plain; charset=utf-8"
            elif filename.endswith(".zip"):
                content_type = "application/octet-stream"
            else:
                content_type = "application/octet-stream"

            self.send_response(200)
            self.send_header("Content-Type", content_type)
            self.end_headers()
            with open(file_path, "rb") as f:
                try:
                    while True:
                        chunk = f.read(8192)
                        if not chunk:
                            break
                        self.wfile.write(chunk)
                except BrokenPipeError:
                    print("客户端提前断开连接")
        else:
            self.send_response(404)
            self.end_headers()
            self.wfile.write(b"File not found")

def run_server(port=1153):
    server = HTTPServer(("0.0.0.0", port), RequestHandler)
    print(f"\nHTTP 服务已启动，端口 {port}")
    print(f"访问 http://127.0.0.1:{port}/output.txt 查看聚合结果")
    print(f"访问 http://127.0.0.1:{port}/update 立即刷新数据")
    print(f"访问 http://127.0.0.1:{port}/ouge.json 等查看本地文件")
    server.serve_forever()


if __name__ == "__main__":
    # 启动时先抓一次
    fetch_all()

    # 开启调度线程
    t = threading.Thread(target=scheduler, daemon=True)
    t.start()

    # 启动 HTTP 服务
    run_server()
