import requests
import os
import json
from dotenv import load_dotenv

load_dotenv()

def scrape_with_cookie():
    """使用Cookie模拟登录状态爬取课程数据"""
    
    # 课程数据API接口（需要从网络请求中分析得到）
    course_api_url = "https://ehall.ynu.edu.cn/jwapp/sys/kcbcx/modules/kcbxxq/cxBjKcb.do"
    
    # 你的Cookie - 直接从浏览器复制
    cookies = {
        '_WEU': 'z6yXnFD1RwjhMZhrVlxXrNcvTETlEQfMQZGVlLxX0jktMiRHjWNX6tfO7PdNx_CE7wRqIjOgXRgpdz1J9tRsWdPo*R*4X43rndu3wifACAaJE7As0C_XsbyDhmlvGho_nS*duRnEGXhPBJjbnshzbZzxE7n1dkifbz*Ysh190sL.',
        'amp.locale': 'undefined',
        'asessionid': 'f09a89b8-433e-4aca-80a6-58bc4df4e95a',
        'EMAP_LANG': 'zh',
        'JSESSIONID': '1xeGIqPVdFxdrqYHqD0fRReYBNVtwAcNIaPIfgIckC8SQYBl4CU!-1629739863',
        'MOD_AUTH_CAS': 'MOD_AUTH_ST-2046846-k-7P0tIh9b38ILs33GULdQN9aoEciapserver2',
        'route': 'd7c8ec4ca6b966c8d9bd38d335f7be19',
        'THEME': 'millennium'
    }
    
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Accept': 'application/json, text/plain, */*',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Content-Type': 'application/x-www-form-urlencoded',
        'Origin': 'https://ehall.ynu.edu.cn',
        'Referer': 'https://ehall.ynu.edu.cn/jwapp/sys/kcbcx/*default/index.do',
        'X-Requested-With': 'XMLHttpRequest'
    }
    
    # 尝试不同的请求参数
    payloads = [
        # 尝试1: 空参数获取所有课程
        {},
        # 尝试2: 获取当前学期课程
        {'XNXQDM': '2024-2025-1'},
        # 尝试3: 获取班级课程
        {'BJDM': ''},
        # 尝试4: 获取个人课程
        {'XH': ''}
    ]
    
    for i, payload in enumerate(payloads, 1):
        try:
            print(f"尝试请求 {i}/4...")
            
            response = requests.post(
                course_api_url,
                headers=headers,
                cookies=cookies,
                data=payload,
                timeout=30
            )
            
            print(f"响应状态码: {response.status_code}")
            
            if response.status_code == 200:
                result = response.json()
                print("✅ 成功获取数据！")
                
                # 保存原始数据用于分析
                with open(f'course_data_{i}.json', 'w', encoding='utf-8') as f:
                    json.dump(result, f, ensure_ascii=False, indent=2)
                
                print(f"已保存数据到 course_data_{i}.json")
                
                # 检查数据格式
                if 'datas' in result and 'cxBjKcb' in result['datas']:
                    courses = result['datas']['cxBjKcb']
                    print(f"找到 {len(courses)} 门课程")
                    return convert_to_markdown(courses)
                else:
                    print("数据格式不符，继续尝试...")
                    print(f"返回的keys: {result.keys() if isinstance(result, dict) else '不是字典'}")
                    continue
            else:
                print(f"请求失败: {response.text}")
                
        except Exception as e:
            print(f"尝试 {i} 失败: {e}")
            continue
    
    print("❌ 所有尝试都失败了")
    return None

def convert_to_markdown(courses_data):
    """将课程数据转换为markdown格式"""
    if not courses_data:
        return "暂无课程数据"
    
    markdown = "# 云南大学课程表\n\n"
    
    for course in courses_data:
        markdown += f"## {course.get('KCM', '未知课程')}\n"
        markdown += f"- **课程代码**: {course.get('KCDM', '未知')}\n"
        markdown += f"- **上课时间**: {course.get('SKJS', '未知')} {course.get('SKXQ', '')} {course.get('KSJC', '')}-{course.get('JSJC', '')}节\n"
        markdown += f"- **上课地点**: {course.get('JSMC', '未知')}\n"
        markdown += f"- **授课教师**: {course.get('SKLS', '未知')}\n"
        markdown += f"- **学分**: {course.get('XF', '未知')}\n"
        markdown += f"- **周次**: {course.get('ZC', '未知')}\n\n"
    
    return markdown

# 测试函数
if __name__ == "__main__":
    data = scrape_with_cookie()
    if data:
        print("爬取到的课程数据:")
        print(data)
    else:
        print("使用模拟数据继续开发...")
        # 如果爬取失败，使用模拟数据
        from crawler_mock import scrape_mock
        mock_data = scrape_mock()
        print("模拟数据:")
        print(mock_data)