from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
import time
from typing import Optional, Dict, Any
import random


class ChaoxingScraper:
    """
    网站爬虫类，用于处理登录、爬取内容和退出操作
    """
    
    def __init__(self, headless: bool = False):
        """
        初始化爬虫
        
        Args:
            headless: 是否使用无头模式（不显示浏览器窗口）
        """
        self.chrome_options = Options()
        if headless:
            self.chrome_options.add_argument('--headless')
        self.chrome_options.add_argument('--disable-gpu')
        self.chrome_options.add_argument('--no-sandbox')
        self.chrome_options.add_argument('--disable-dev-shm-usage')
        
        self.driver = None
        self.wait = None
        self.is_logged_in = False
    
    def login(self, username: str, password: str, wait_time: int = 10) -> bool:
    
        """
        登录超星网站
        
        Args:
            username: 登录用户名
            password: 登录密码
            wait_time: 等待时间（秒）
        
        Returns:
            bool: 登录是否成功
        """
        try:
            # 初始化WebDriver（如果还没有初始化）
            if not self.driver:
                self.driver = webdriver.Chrome(
                    service=Service(ChromeDriverManager().install()),
                    options=self.chrome_options
                )
                self.wait = WebDriverWait(self.driver, wait_time)
            
            # 访问登录页面
            login_url = "http://passport2.chaoxing.com/login"
            self.driver.get(login_url)
            
            # 等待登录表单加载
            username_input = self.wait.until(EC.presence_of_element_located((By.ID, "phone")))
            password_input = self.driver.find_element(By.ID, "pwd")
            login_button = self.driver.find_element(By.ID, "loginBtn")
            
            # 输入登录信息
            username_input.clear()
            username_input.send_keys(username)
            password_input.clear()
            password_input.send_keys(password)
            
            # 点击登录按钮
            login_button.click()
            
            # 等待登录完成
            time.sleep(3)
            
            # 检查是否登录成功
            if "passport2.chaoxing.com/login" not in self.driver.current_url:
                print("登录成功！")
                self.is_logged_in = True
                return True
            else:
                print("登录失败，请检查用户名和密码")
                return False
                
        except Exception as e:
            print(f"登录过程中发生错误: {str(e)}")
            return False

    def save_page(self, content: str, filename: str) -> bool:
        """
        保存页面内容到文件
        
        Args:
            content: 要保存的内容
            filename: 保存的文件名
            
        Returns:
            bool: 保存是否成功
        """
        try:
            with open(filename, "w", encoding="utf-8") as f:
                f.write(content)
                print(f"\n页面内容已保存到 {filename}")
            return True
        except Exception as e:
            print(f"保存文件时发生错误: {str(e)}")
            return False
    
    def scrape_page(self, url: str, wait_time: int = 10, tmpid: int = 0) -> Dict[str, Any]:
        """
        爬取指定URL的页面内容
        
        Args:
            url: 要爬取的网页URL
            wait_time: 等待页面加载的最大时间（秒），默认为10秒
        
        Returns:
            Dict包含以下键值:
            - 'page_source': 页面源代码
            - 'title': 页面标题
            - 'error': 如果发生错误，则包含错误信息
        """
        if not self.driver:
            raise Exception("请先调用login()方法进行登录")

        result = {
            'page_source': '',
            'title': '',
            'error': None
        }

        try:
            # 访问目标URL
            self.driver.get(url)
            
            # 获取页面标题
            result['title'] = self.driver.title
            
            # 获取页面源代码
            result['page_source'] = self.driver.page_source
            
            print(f"成功获取页面内容:{tmpid}")
            
        except Exception as e:
            error_msg = f"爬取过程中发生错误: {str(e)}"
            print(error_msg)
            result['error'] = error_msg
        
        return result
    
    def quit(self):
        """
        退出并清理资源
        """
        if self.driver:
            self.driver.quit()
            self.driver = None
            self.wait = None
            self.is_logged_in = False


if __name__ == "__main__":
    from course_parser import CourseParser
    import os
    import json
    from datetime import datetime
    
    # 创建输出目录
    output_dir = "course_data"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    # 创建爬虫实例
    scraper = ChaoxingScraper(headless=True)  # 使用无头模式加快爬取
    
    try:
        # 登录
        # http://passport2.chaoxing.com/login
        # 使用上面网站登录时的账号和密码。
        if scraper.login("你的账号/手机号", "密码"):
            # 创建 outcome 目录（如果不存在）
            outcome_dir = "outcome"
            if not os.path.exists(outcome_dir):
                os.makedirs(outcome_dir)

            # url号范围
            for id in range(12021373, 12099999, 15):
                # 爬取页面
                result = scraper.scrape_page(
                    url=f"http://newesxidian.chaoxing.com/live/viewNewCourseLive1?liveId={id}",
                    tmpid=id
                )

                # time.sleep(random.uniform(0,0.7)) 

                if result['error'] is None:
                    # 创建解析器实例
                    parser = CourseParser(result['page_source'])
                    all_info = parser.get_all_info()

                    if all_info['course_name'] == '毛泽东思想和中国特色社会主义理论体系概论':
                        # 使用 os.path.join 来创建文件路径
                        file_path = os.path.join(outcome_dir, f"毛概_{all_info['teacher_name']}_{id}.html")
                        
                        if scraper.save_page(result['page_source'], file_path):
                            # print(f"\n获取到的页面长度: {len(result['page_source'])} 字符")
                            # print(f"成功获取页面内容")
                            pass
                        else:
                            print(f"保存文件失败: {file_path}")
                
                if id % 100 == 0:
                    time.sleep(random.uniform(0.2, 2)) # 给予时间缓冲，防止被当成非法访问
            
    finally:
        # 退出并清理资源
        scraper.quit()
