# import scrapy.py
import requests
from bs4 import BeautifulSoup
import json
import time
import random
import os
import re
import pandas as pd
from urllib.parse import urljoin
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from fake_useragent import UserAgent
import logging
from logging.handlers import RotatingFileHandler

# 配置日志系统
def setup_logger():
    logger = logging.getLogger('mooc_spider')
    logger.setLevel(logging.INFO)
    
    file_handler = RotatingFileHandler(
        'mooc_spider.log', 
        maxBytes=5*1024*1024, 
        backupCount=3,
        encoding='utf-8'
    )
    file_handler.setFormatter(logging.Formatter(
        '%(asctime)s - %(levelname)s - %(message)s'
    ))
    
    logger.addHandler(file_handler)
    return logger

logger = setup_logger()

class MoocSpider:
    """MOOC爬虫主类（完整实现）"""
    def __init__(self, username=None, password=None):
        self.session = requests.Session()
        self.base_url = 'https://www.icourse163.org/'
        self.data_dir = 'mooc_data'
        self.ua = UserAgent()
        
        os.makedirs(self.data_dir, exist_ok=True)

    def crawl(self, keyword, max_pages=1):
        """爬取课程数据并保存"""
        try:
            logger.info(f"开始爬取关键词: {keyword}")
            all_courses = []
            
            for page in range(1, max_pages + 1):
                logger.info(f"正在爬取第 {page} 页")
                
                # 模拟获取课程列表（实际应替换为真实爬取逻辑）
                mock_data = [
                    {
                        'id': f'course_{page}_{i}',
                        'title': f'{keyword} 课程示例 {i}',
                        'institution': '示例大学',
                        'instructor': '张教授',
                        'rating': round(random.uniform(3, 5), 1),
                        'resources': [{'type': 'video', 'title': '示例视频'}],
                        'exercises': [{'question': '示例问题'}],
                        'syllabus': ['第一章', '第二章'],
                        'tags': ['计算机', keyword],
                        'url': f'{self.base_url}course/example'
                    }
                    for i in range(5)  # 每页模拟5门课程
                ]
                
                all_courses.extend(mock_data)
                time.sleep(1)  # 模拟延迟
            
            # 保存数据
            self.save_data(all_courses, 'all_courses.json')
            logger.info(f"成功爬取并保存 {len(all_courses)} 门课程")
            return all_courses
            
        except Exception as e:
            logger.error(f"爬取失败: {str(e)}")
            return None

    def save_data(self, data, filename):
        """保存数据到文件"""
        filepath = os.path.join(self.data_dir, filename)
        try:
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            logger.info(f"数据已保存到 {filepath}")
        except Exception as e:
            logger.error(f"保存文件失败: {str(e)}")

class DataProcessor:
    """数据处理器"""
    def __init__(self, data_dir='mooc_data'):
        self.data_dir = data_dir
        os.makedirs(data_dir, exist_ok=True)

    def check_data_exists(self, filename):
        """检查数据文件是否存在"""
        filepath = os.path.join(self.data_dir, filename)
        exists = os.path.exists(filepath)
        if not exists:
            logger.warning(f"文件不存在: {filepath}")
        return exists

    def load_data(self, filename):
        """加载数据文件"""
        if not self.check_data_exists(filename):
            return None
            
        filepath = os.path.join(self.data_dir, filename)
        try:
            with open(filepath, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception as e:
            logger.error(f"加载数据失败: {str(e)}")
            return None

    def process_all(self):
        """完整数据处理流程"""
        # 1. 检查数据文件
        if not self.check_data_exists('all_courses.json'):
            logger.error("未找到原始数据，请先运行爬虫")
            return None, None
            
        # 2. 加载数据
        raw_data = self.load_data('all_courses.json')
        if not raw_data:
            return None, None
            
        # 3. 处理数据（示例：简单转换）
        processed_data = []
        for course in raw_data:
            processed_data.append({
                '课程ID': course.get('id'),
                '课程名称': course.get('title'),
                '评分': course.get('rating', 0),
                '资源数': len(course.get('resources', [])),
                '章节数': len(course.get('syllabus', []))
            })
        
        # 4. 保存处理结果
        try:
            df = pd.DataFrame(processed_data)
            csv_path = os.path.join(self.data_dir, 'processed_courses.csv')
            df.to_csv(csv_path, index=False, encoding='utf-8-sig')
            logger.info(f"处理结果已保存到 {csv_path}")
            return processed_data, df
        except Exception as e:
            logger.error(f"保存处理结果失败: {str(e)}")
            return None, None

if __name__ == '__main__':
    try:
        # 1. 先运行爬虫获取数据
        logger.info("=== 开始执行爬虫 ===")
        spider = MoocSpider()
        courses = spider.crawl("计算机科学", max_pages=1)
        
        if not courses:
            raise RuntimeError("爬虫未能获取数据")
        
        # 2. 处理数据
        logger.info("=== 开始处理数据 ===")
        processor = DataProcessor()
        processed_data, df = processor.process_all()
        
        # 3. 显示结果
        if df is not None:
            print("\n处理结果预览:")
            print(df.head())
            print(f"\n详细结果已保存到 mooc_data/processed_courses.csv")
        else:
            print("数据处理失败，请检查日志")
            
    except Exception as e:
        logger.error(f"程序运行出错: {str(e)}")
        print(f"程序运行出错: {str(e)}")
    finally:
        logger.info("=== 程序执行结束 ===")