#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Cursor Changelog 爬虫程序
爬取 https://www.cursor.com/changelog 页面的更新日志信息
"""

import requests
from bs4 import BeautifulSoup
import json
import csv
from datetime import datetime
import time
import os
from urllib.parse import urljoin, urlparse


class CursorChangelogScraper:
    def __init__(self):
        self.base_url = "https://www.cursor.com/changelog"
        self.session = requests.Session()
        # 设置请求头，模拟浏览器访问
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        })
        
    def fetch_page(self, url):
        """获取网页内容"""
        try:
            print(f"正在获取页面: {url}")
            response = self.session.get(url, timeout=30)
            response.raise_for_status()
            
            # 检查响应头中的编码信息
            print(f"响应状态码: {response.status_code}")
            print(f"Content-Type: {response.headers.get('Content-Type', '未知')}")
            print(f"Content-Encoding: {response.headers.get('Content-Encoding', '无')}")
            
            # 尝试自动检测编码
            if response.encoding == 'ISO-8859-1':
                response.encoding = response.apparent_encoding
            
            print(f"使用编码: {response.encoding}")
            return response.text
        except requests.RequestException as e:
            print(f"获取页面失败: {e}")
            return None
    
    def parse_changelog(self, html_content):
        """解析changelog页面内容"""
        if not html_content:
            return []
        
        soup = BeautifulSoup(html_content, 'html.parser')
        changelog_entries = []
        
        # 先保存原始HTML用于调试
        with open('debug_page.html', 'w', encoding='utf-8') as f:
            f.write(html_content)
        print("页面HTML已保存到 debug_page.html 用于调试")
        
        # 获取页面基本信息
        title = soup.find('title')
        if title:
            changelog_entries.append({
                'type': 'page_title',
                'content': title.get_text().strip(),
                'timestamp': datetime.now().isoformat()
            })
            print(f"页面标题: {title.get_text().strip()}")
        
        # 检查页面是否包含JavaScript渲染的内容
        scripts = soup.find_all('script')
        print(f"页面包含 {len(scripts)} 个script标签")
        
        # 尝试多种可能的选择器来找到更新日志条目
        selectors = [
            'div[class*="changelog"]',
            'div[class*="update"]',
            'div[class*="version"]',
            'article',
            '.entry',
            '.post',
            'div[data-testid*="changelog"]',
            'main',
            '[role="main"]',
            '.content',
            '.container'
        ]
        
        entries = []
        for selector in selectors:
            entries = soup.select(selector)
            if entries:
                print(f"使用选择器 '{selector}' 找到 {len(entries)} 个条目")
                break
        
        # 如果没有找到特定条目，尝试获取所有可能的内容
        if not entries:
            print("未找到特定的changelog条目，尝试获取页面主要内容...")
            
            # 获取所有段落
            paragraphs = soup.find_all('p')
            print(f"找到 {len(paragraphs)} 个段落")
            for i, p in enumerate(paragraphs[:10]):  # 限制前10个段落
                text = p.get_text().strip()
                if text and len(text) > 10:  # 过滤掉太短的文本
                    changelog_entries.append({
                        'type': 'paragraph',
                        'content': text,
                        'index': i,
                        'timestamp': datetime.now().isoformat()
                    })
                    print(f"段落 {i}: {text[:100]}...")
            
            # 获取所有标题
            headers = soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])
            print(f"找到 {len(headers)} 个标题")
            for header in headers:
                text = header.get_text().strip()
                if text:
                    changelog_entries.append({
                        'type': 'header',
                        'level': header.name,
                        'content': text,
                        'timestamp': datetime.now().isoformat()
                    })
                    print(f"标题 {header.name}: {text}")
            
            # 获取所有div内容
            divs = soup.find_all('div')
            print(f"找到 {len(divs)} 个div元素")
            for i, div in enumerate(divs[:20]):  # 限制前20个div
                text = div.get_text().strip()
                if text and len(text) > 20 and len(text) < 500:  # 过滤合适长度的文本
                    changelog_entries.append({
                        'type': 'div_content',
                        'content': text,
                        'index': i,
                        'timestamp': datetime.now().isoformat()
                    })
                    print(f"Div {i}: {text[:100]}...")
        else:
            # 解析找到的条目
            for i, entry in enumerate(entries):
                entry_data = {
                    'index': i,
                    'timestamp': datetime.now().isoformat(),
                    'raw_html': str(entry)[:500] + '...' if len(str(entry)) > 500 else str(entry)
                }
                
                # 提取文本内容
                text_content = entry.get_text().strip()
                if text_content:
                    entry_data['content'] = text_content
                
                # 查找日期信息
                date_elements = entry.find_all(['time', 'span', 'div'], class_=lambda x: x and any(
                    keyword in x.lower() for keyword in ['date', 'time', 'published', 'updated']
                ))
                if date_elements:
                    entry_data['date'] = date_elements[0].get_text().strip()
                
                # 查找版本信息
                version_elements = entry.find_all(['span', 'div', 'h1', 'h2', 'h3'], 
                                                string=lambda text: text and any(
                                                    keyword in text.lower() for keyword in ['v0.', 'version', 'release']
                                                ))
                if version_elements:
                    entry_data['version'] = version_elements[0].get_text().strip()
                
                changelog_entries.append(entry_data)
        
        print(f"总共解析到 {len(changelog_entries)} 条记录")
        return changelog_entries
    
    def save_to_json(self, data, filename="cursor_changelog.json"):
        """保存数据到JSON文件"""
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            print(f"数据已保存到 {filename}")
        except Exception as e:
            print(f"保存JSON文件失败: {e}")
    
    def save_to_csv(self, data, filename="cursor_changelog.csv"):
        """保存数据到CSV文件"""
        try:
            if not data:
                print("没有数据可保存")
                return
            
            # 获取所有可能的字段
            all_fields = set()
            for entry in data:
                all_fields.update(entry.keys())
            
            fieldnames = ['timestamp', 'type', 'content', 'version', 'date', 'index', 'level', 'raw_html']
            
            with open(filename, 'w', newline='', encoding='utf-8') as f:
                writer = csv.DictWriter(f, fieldnames=fieldnames)
                writer.writeheader()
                for entry in data:
                    # 确保所有字段都存在
                    row = {field: entry.get(field, '') for field in fieldnames}
                    writer.writerow(row)
            print(f"数据已保存到 {filename}")
        except Exception as e:
            print(f"保存CSV文件失败: {e}")
    
    def save_to_txt(self, data, filename="cursor_changelog.txt"):
        """保存数据到文本文件"""
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                f.write(f"Cursor Changelog 爬取结果\n")
                f.write(f"爬取时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"数据来源: {self.base_url}\n")
                f.write("=" * 50 + "\n\n")
                
                for i, entry in enumerate(data, 1):
                    f.write(f"条目 {i}:\n")
                    f.write(f"类型: {entry.get('type', '未知')}\n")
                    f.write(f"时间: {entry.get('timestamp', '未知')}\n")
                    
                    if entry.get('version'):
                        f.write(f"版本: {entry['version']}\n")
                    if entry.get('date'):
                        f.write(f"日期: {entry['date']}\n")
                    if entry.get('level'):
                        f.write(f"标题级别: {entry['level']}\n")
                    
                    f.write(f"内容: {entry.get('content', '无内容')}\n")
                    f.write("-" * 30 + "\n\n")
            
            print(f"数据已保存到 {filename}")
        except Exception as e:
            print(f"保存文本文件失败: {e}")
    
    def run(self):
        """运行爬虫"""
        print("开始爬取 Cursor Changelog...")
        print(f"目标URL: {self.base_url}")
        
        # 获取页面内容
        html_content = self.fetch_page(self.base_url)
        if not html_content:
            print("无法获取页面内容，程序退出")
            return
        
        # 解析内容
        changelog_data = self.parse_changelog(html_content)
        
        if not changelog_data:
            print("未找到任何changelog数据")
            return
        
        print(f"成功解析到 {len(changelog_data)} 条记录")
        
        # 保存数据到不同格式的文件
        self.save_to_json(changelog_data)
        self.save_to_csv(changelog_data)
        self.save_to_txt(changelog_data)
        
        print("爬取完成！")


def main():
    """主函数"""
    scraper = CursorChangelogScraper()
    scraper.run()


if __name__ == "__main__":
    main()
