import os
import time
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import re
from urllib.parse import urljoin, urlparse
import hashlib


def create_safe_filename(filename):
    """创建安全的文件名"""
    filename = re.sub(r'[<>:"/\\|?*]', '_', filename)
    filename = filename.strip()
    if not filename:
        filename = 'untitled'
    return filename


def download_resource(url, save_path):
    """下载CSS、JS等静态资源"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(url, headers=headers, timeout=30)
        response.raise_for_status()
        
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        with open(save_path, 'wb') as f:
            f.write(response.content)
        return True
    except Exception as e:
        print(f"下载资源失败 {url}: {e}")
        return False


def process_html_with_resources(html_content, base_url, download_dir, page_title):
    """处理HTML并下载相关资源"""
    soup = BeautifulSoup(html_content, 'html.parser')
    
    # 创建资源目录
    resources_dir = os.path.join(download_dir, "resources")
    os.makedirs(resources_dir, exist_ok=True)
    
    # 处理CSS链接
    for link in soup.find_all('link', rel='stylesheet'):
        if link.get('href'):
            css_url = urljoin(base_url, link['href'])
            # 创建本地文件名
            css_filename = hashlib.md5(css_url.encode()).hexdigest()[:8] + '.css'
            css_path = os.path.join(resources_dir, css_filename)
            
            if download_resource(css_url, css_path):
                # 更新HTML中的链接
                link['href'] = f"resources/{css_filename}"
                print(f"✓ 下载CSS: {css_filename}")
    
    # 处理JavaScript链接
    for script in soup.find_all('script', src=True):
        if script.get('src'):
            js_url = urljoin(base_url, script['src'])
            js_filename = hashlib.md5(js_url.encode()).hexdigest()[:8] + '.js'
            js_path = os.path.join(resources_dir, js_filename)
            
            if download_resource(js_url, js_path):
                script['src'] = f"resources/{js_filename}"
                print(f"✓ 下载JS: {js_filename}")
    
    # 处理图片
    for img in soup.find_all('img', src=True):
        if img.get('src'):
            img_url = urljoin(base_url, img['src'])
            parsed_url = urlparse(img_url)
            file_ext = os.path.splitext(parsed_url.path)[1] or '.jpg'
            img_filename = hashlib.md5(img_url.encode()).hexdigest()[:8] + file_ext
            img_path = os.path.join(resources_dir, img_filename)
            
            if download_resource(img_url, img_path):
                img['src'] = f"resources/{img_filename}"
                print(f"✓ 下载图片: {img_filename}")
    
    # 添加一些基本样式以改善显示效果
    style_tag = soup.new_tag('style')
    style_tag.string = """
    body { 
        max-width: 1200px; 
        margin: 0 auto; 
        padding: 20px; 
        font-family: Arial, sans-serif; 
        line-height: 1.6; 
        color: #333;
    }
    .content { 
        background: #fff; 
        padding: 20px; 
        border-radius: 8px; 
        box-shadow: 0 2px 10px rgba(0,0,0,0.1);
    }
    h1, h2, h3, h4, h5, h6 { 
        color: #2c3e50; 
        margin-top: 30px; 
        margin-bottom: 15px;
    }
    p { 
        margin-bottom: 15px; 
    }
    code { 
        background: #f4f4f4; 
        padding: 2px 4px; 
        border-radius: 3px; 
        font-family: 'Courier New', monospace;
    }
    pre { 
        background: #f8f8f8; 
        padding: 15px; 
        border-radius: 5px; 
        overflow-x: auto;
        border-left: 4px solid #3498db;
    }
    a { 
        color: #3498db; 
        text-decoration: none; 
    }
    a:hover { 
        text-decoration: underline; 
    }
    """
    
    if soup.head:
        soup.head.append(style_tag)
    else:
        # 如果没有head标签，创建一个
        head_tag = soup.new_tag('head')
        head_tag.append(style_tag)
        if soup.html:
            soup.html.insert(0, head_tag)
    
    return str(soup)





def setup_driver():
    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 重新启用无头模式
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('--disable-blink-features=AutomationControlled')
    chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
    chrome_options.add_experimental_option('useAutomationExtension', False)
    chrome_options.add_argument('--window-size=1920,1080')  # 设置窗口大小
    chrome_options.add_argument('--disable-extensions')
    chrome_options.add_argument('--disable-plugins')

    try:
        driver = webdriver.Chrome(service=webdriver.chrome.service.Service(ChromeDriverManager().install()),
                                  options=chrome_options)
        driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        return driver
    except Exception as e:
        print(f"Chrome驱动启动失败: {e}")
        return None


def save_page_content(driver, url, filename, download_dir):
    """保存页面完整内容，包括样式和资源"""
    try:
        print(f"正在访问: {url}")
        driver.get(url)

        # 等待页面加载
        WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.TAG_NAME, "body"))
        )

        # 等待额外时间让动态内容加载
        time.sleep(3)

        # 获取页面内容
        page_source = driver.page_source
        
        # 处理HTML并下载资源
        processed_html = process_html_with_resources(page_source, url, download_dir, filename)

        # 保存完整的HTML文件（包含样式和资源）
        html_path = os.path.join(download_dir, filename)
        with open(html_path, 'w', encoding='utf-8') as f:
            f.write(processed_html)

        # 同时保存纯文本文件
        soup = BeautifulSoup(page_source, 'html.parser')
        
        # 移除script和style标签
        for script in soup(["script", "style"]):
            script.decompose()

        # 提取主要内容
        main_content = soup.find('div', class_='content') or soup.find('article') or soup.find('main') or soup.body

        if main_content:
            text_content = main_content.get_text()
            # 清理文本
            lines = (line.strip() for line in text_content.splitlines())
            text_content = '\n'.join(line for line in lines if line)
        else:
            text_content = soup.get_text()

        # 保存为文本文件
        txt_path = os.path.join(download_dir, filename.replace('.html', '.txt'))
        with open(txt_path, 'w', encoding='utf-8') as f:
            f.write(f"URL: {url}\n")
            f.write("=" * 50 + "\n\n")
            f.write(text_content)

        print(f"✓ 保存完成: {filename}")
        return True

    except Exception as e:
        print(f"✗ 保存失败 {url}: {e}")
        return False


def scrape_go_course_selenium():
    """使用selenium爬取Go语言课程"""
    base_url = "https://learn.lianglianglee.com/%E4%B8%93%E6%A0%8F/Tony%20Bai%20%C2%B7%20Go%E8%AF%AD%E8%A8%80%E7%AC%AC%E4%B8%80%E8%AF%BE/"
    download_dir = "D:\\Goxuexi"

    # 创建目录
    os.makedirs(download_dir, exist_ok=True)

    # 设置驱动
    driver = setup_driver()
    if not driver:
        return

    try:
        print("正在获取课程目录...")
        driver.get(base_url)

        # 等待页面加载
        WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.TAG_NAME, "body"))
        )
        time.sleep(5)

        # 获取所有链接
        links = driver.find_elements(By.TAG_NAME, "a")
        print(f"页面中总共找到 {len(links)} 个链接")

        course_links = []
        debug_links = []
        
        for link in links:
            try:
                href = link.get_attribute('href')
                text = link.text.strip()
                
                # 收集所有包含learn.lianglianglee.com的链接用于调试
                if href and 'learn.lianglianglee.com' in href:
                    debug_links.append((href, text))

                if href and text and ('learn.lianglianglee.com' in href):
                    # 更宽松的过滤条件
                    if any(keyword in text for keyword in ['第', '课', '章', '节', 'Go', '语言', '基础', '实战', '开篇', '结束']) or \
                            any(keyword in href for keyword in ['%E7%AC%AC', '%E8%AF%BE', 'Go', 'Tony']):
                        course_links.append((href, text))
            except:
                continue

        print(f"找到包含learn.lianglianglee.com的链接: {len(debug_links)} 个")
        for href, text in debug_links[:10]:  # 只显示前10个用于调试
            print(f"  - {text[:50]}... -> {href}")
            
        print(f"找到 {len(course_links)} 个课程链接")

        downloaded_count = 0
        failed_count = 0

        for href, text in course_links:
            filename = create_safe_filename(text) + '.html'

            # 检查是否已存在
            txt_path = os.path.join(download_dir, filename.replace('.html', '.txt'))
            if os.path.exists(txt_path):
                print(f"已存在，跳过: {text}")
                continue

            if save_page_content(driver, href, filename, download_dir):
                downloaded_count += 1
            else:
                failed_count += 1

            time.sleep(2)  # 避免请求过快

        print(f"\n下载完成！")
        print(f"成功下载: {downloaded_count} 个文件")
        print(f"下载失败: {failed_count} 个文件")
        print(f"文件保存在: {download_dir}")

    except Exception as e:
        print(f"爬取过程中出现错误: {e}")
    finally:
        driver.quit()


if __name__ == "__main__":
    scrape_go_course_selenium()