import os
import re
import sys
import time
from pathlib import Path
from urllib.parse import urlparse, unquote
from concurrent.futures import ThreadPoolExecutor, as_completed

import requests
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from webdriver_manager.chrome import ChromeDriverManager
from tqdm import tqdm


# ====== 通用配置 ======
if getattr(sys, 'frozen', False):
    # 如果是 PyInstaller 打包后的可执行文件
    PROJECT_ROOT = Path(sys.executable).resolve().parent
else:
    # 解释器直接执行 .py
    PROJECT_ROOT = Path(__file__).resolve().parents[2]

SAVE_DIR = PROJECT_ROOT / "downloads"
DEBUG_DIR = PROJECT_ROOT / "debug"

SAVE_DIR.mkdir(exist_ok=True)
DEBUG_DIR.mkdir(exist_ok=True)


# ====== 颜色工具 ======
class Colors:
    HEADER = '\033[95m'
    OKBLUE = '\033[94m'
    OKCYAN = '\033[96m'
    OKGREEN = '\033[92m'
    WARNING = '\033[93m'
    FAIL = '\033[91m'
    ENDC = '\033[0m'
    BOLD = '\033[1m'


# ====== Selenium 抓取器 ======
class WebScraper:
    def __init__(self):
        print(f"{Colors.HEADER}🚀 正在创建浏览器实例...{Colors.ENDC}")
        start = time.time()

        options = webdriver.ChromeOptions()
        options.add_argument("--headless")
        options.add_argument("--disable-gpu")
        options.add_argument("--ignore-certificate-errors")
        options.add_argument("--allow-insecure-localhost")

        self.driver = webdriver.Chrome(
            service=Service(ChromeDriverManager().install()),
            options=options
        )

        print(f"{Colors.OKGREEN}✅ 浏览器已就绪，耗时 {round(time.time() - start, 2)} 秒{Colors.ENDC}")
        self.page_title = ""

    def _sanitize_filename(self, name: str, max_len=24) -> str:
        """去除不合法字符，并截断过长标题"""
        # 替换所有非法字符为下划线
        name = re.sub(r'[<>:"/\\|?*\n\r]+', '', name)
        name = name.strip().replace(' ', '_')
        return name[:max_len]

    def scrape_images(self, url):
        """抓取页面图片 URL"""
        print(f"{Colors.HEADER}开始解析页面: {url}{Colors.ENDC}")
        start = time.time()
        self.driver.get(url)
        wait = WebDriverWait(self.driver, 8)

        try:
            elements = wait.until(
                lambda d: d.find_elements(By.CSS_SELECTOR, ".shot-page-container div > a > img"),
                message="未找到图片元素"
            )

            raw_title = self.driver.title.strip() or "Untitled"
            self.page_title = self._sanitize_filename(raw_title, max_len=24)
            print(f"{Colors.OKBLUE}检测到 {len(elements)} 个图片元素 [{self.page_title}]{Colors.ENDC}")

            urls = []
            for idx, img in enumerate(elements, 1):
                url = self._extract_image_url(img)
                if url:
                    urls.append(url)
                    print(f"{Colors.OKGREEN}✔ 第 {idx} 张: {url}{Colors.ENDC}")
                else:
                    print(f"{Colors.WARNING}⚠️ 第 {idx} 张无可用链接{Colors.ENDC}")

            return urls

        except TimeoutException:
            print(f"{Colors.FAIL}⏰ 页面加载超时{Colors.ENDC}")
            self._save_debug()
            return []
        except Exception as e:
            print(f"{Colors.FAIL}💥 抓取异常: {e}{Colors.ENDC}")
            self._save_debug()
            return []
        finally:
            print(f"{Colors.OKCYAN}抓取结束，耗时 {round(time.time() - start, 2)} 秒{Colors.ENDC}")

    def _extract_image_url(self, img):
        """提取最佳 URL"""
        for attr in ["src", "srcset", "data-srcset", "data-src"]:
            val = img.get_attribute(attr)
            if val and "http" in val:
                if "set" in attr:
                    val = val.split(',')[0]
                return val.split('?')[0].strip()
        return None

    def _save_debug(self):
        """保存调试快照"""
        ts = time.strftime("%Y%m%d-%H%M%S")
        (DEBUG_DIR / f"debug_{ts}.png").write_bytes(self.driver.get_screenshot_as_png())
        (DEBUG_DIR / f"page_{ts}.html").write_text(self.driver.page_source, encoding="utf-8")
        print(f"{Colors.WARNING}调试文件已保存到 {DEBUG_DIR}{Colors.ENDC}")

    def save_urls_to_txt(self, urls):
        """保存 URL 列表"""
        if not urls:
            print(f"{Colors.WARNING}⚠️ 无可保存链接{Colors.ENDC}")
            return
        folder = SAVE_DIR / self.page_title
        folder.mkdir(exist_ok=True)
        ts = time.strftime("%Y%m%d-%H%M%S")
        out_file = folder / f"image_links_{ts}.txt"
        out_file.write_text("\n".join(urls), encoding="utf-8")
        print(f"{Colors.OKGREEN}✅ 链接已写入: {out_file}{Colors.ENDC}")

    def close(self):
        self.driver.quit()
        print(f"{Colors.OKBLUE}🛑 已关闭浏览器实例{Colors.ENDC}")


# ====== 多线程下载器 ======
def download_file(url, save_dir, idx, total):
    parse = urlparse(url)
    base = os.path.basename(parse.path)
    name = unquote(os.path.splitext(base)[0])
    ext = os.path.splitext(base)[1] or ".bin"
    file_name = f"{name}-{idx + 1}{ext}"
    path = save_dir / file_name

    try:
        with requests.get(url, stream=True) as r:
            r.raise_for_status()

            with open(path, 'wb') as f:
                for chunk in r.iter_content(chunk_size=8192):
                    f.write(chunk)

    except Exception as e:
        tqdm.write(f"{Colors.FAIL}❌ 下载失败: {file_name} -> {e}{Colors.ENDC}")
        return None

    return file_name


def save_files_multi(urls, save_dir):
    start = time.time()
    max_workers = min(8, len(urls))

    main_bar = tqdm(total=len(urls), desc="总进度", position=0)

    def worker(url, idx):
        name = download_file(url, save_dir, idx, len(urls))
        return name

    results = []
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = [executor.submit(worker, url, i) for i, url in enumerate(urls)]
        for f in as_completed(futures):
            res = f.result()
            if res:
                main_bar.update(1)
                results.append(res)

    main_bar.close()
    print(f"{Colors.OKGREEN}✅ 文件保存路径: {save_dir}{Colors.ENDC}")
    tqdm.write(f"{Colors.OKGREEN}✅ 所有文件已下载，耗时 {round(time.time() - start, 2)} 秒{Colors.ENDC}")
    return results


# ====== 入口 ======
if __name__ == "__main__":
    page_url = input("请输入 Dribbble 页面 URL：").strip()
    total_start = time.time()

    scraper = WebScraper()
    try:
        urls = scraper.scrape_images(page_url)
        if urls:
            target_dir = SAVE_DIR / scraper.page_title
            target_dir.mkdir(exist_ok=True)
            save_files_multi(urls, target_dir)
            scraper.save_urls_to_txt(urls)
    finally:
        scraper.close()
        print(f"{Colors.OKBLUE}全部任务完成，耗时 {round(time.time() - total_start, 2)} 秒{Colors.ENDC}")

    input("✅ 下载已完成，按回车键退出...")