#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
获取 GitCode 上 arkui-x 项目 issues 的脚本
支持分页获取、多种状态筛选和数据保存功能
"""

import requests
import json
import time
import os
from typing import Dict, List, Optional
from datetime import datetime
import re
import pandas as pd

# 配置参数
ALL_STATES = ["all"]  # 可以扩展为 ["all", "opened", "closed"]
# 创建时间过滤条件，设置为None表示不过滤，格式：YYYY-MM-DD
CREATED_AFTER = "2025-7-1"  # 例如： "2023-01-01" 表示只获取2023年1月1日之后创建的issues


class GitCodeIssuesFetcher:
    """GitCode Issues 获取器"""
    
    def __init__(self):
        """初始化获取器"""
        self.base_url = "https://web-api.gitcode.com/issuepr/api/v1/groups/arkui-x/issues"
        self.headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
            "accept": "application/json, text/plain, */*",
            "referer": "https://gitcode.com/",
            "x-platform": "web",
            "x-device-type": "Windows",
            "sec-ch-ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
            "sec-ch-ua-platform": '"Windows"'
        }
        self.all_issues = []
        
    def fetch_issues(self, state: str = "all", per_page: int = 10, created_after: str = None) -> List[Dict]:
        """
        获取指定状态的issues
        
        Args:
            state: issues状态 ("all", "opened", "closed")
            per_page: 每页返回的记录数
            created_after: 创建时间过滤条件，格式：YYYY-MM-DD，只获取此日期之后创建的issues
            
        Returns:
            包含所有issues的列表
        """
        print(f"开始获取状态为 '{state}' 的 issues...")
        self.all_issues = []
        page = 1
        
        while True:
            # 构造请求URL
            url = f"{self.base_url}?scope=all&state={state}&page={page}&per_page={per_page}"
            
            try:
                print(f"正在获取第 {page} 页...")
                response = requests.get(url, headers=self.headers, timeout=10)
                response.raise_for_status()
                
                data = response.json()
                
                # 检查是否有数据
                if not data.get("content") or not data["content"].get("issues"):
                    print(f"第 {page} 页没有更多数据，停止获取。")
                    break
                
                issues = data["content"]["issues"]
                
                # 应用创建时间过滤
                if created_after:
                    filtered_issues = []
                    for issue in issues:
                        created_at = issue.get("created_at", "")
                        if created_at:
                            try:
                                # 解析创建时间
                                issue_date = datetime.fromisoformat(created_at.replace('Z', '+00:00')).date()
                                filter_date = datetime.strptime(created_after, "%Y-%m-%d").date()
                                if issue_date >= filter_date:
                                    filtered_issues.append(issue)
                            except ValueError as e:
                                print(f"时间解析错误: {e}, 跳过此issue")
                                continue
                    issues = filtered_issues
                    print(f"第 {page} 页过滤后获取到 {len(issues)} 条记录")
                    
                    # 优化：如果当前页过滤后没有记录，由于issues按时间排序，后续页面也不会有符合条件的记录
                    if len(issues) == 0:
                        print(f"第 {page} 页过滤后无记录，由于issues按时间排序，后续页面也不会有符合条件的记录，停止获取")
                        break
                else:
                    print(f"第 {page} 页获取到 {len(issues)} 条记录")
                
                self.all_issues.extend(issues)
                
                # 检查是否还有下一页
                # if page > 2:
                #     break
                if page >= data.get("page_count", 0):
                    print("已到达最后一页，停止获取。")
                    break
                    
                page += 1
                # 添加延迟避免请求过于频繁
                time.sleep(3)
                
            except requests.exceptions.RequestException as e:
                print(f"请求第 {page} 页时发生错误: {e}")
                break
            except json.JSONDecodeError as e:
                print(f"解析第 {page} 页JSON数据时发生错误: {e}")
                break
            except Exception as e:
                print(f"获取第 {page} 页时发生未知错误: {e}")
                break
                
        print(f"获取完成，共获得 {len(self.all_issues)} 条 {state} 状态的 issues")
        return self.all_issues
    
    def save_to_file(self, filename: str = "issues.json") -> bool:
        """
        将获取到的issues保存到文件
        
        Args:
            filename: 保存的文件名
            
        Returns:
            保存成功返回True，否则返回False
        """
        try:
            with open(filename, "w", encoding="utf-8") as f:
                json.dump(self.all_issues, f, ensure_ascii=False, indent=2)
            print(f"数据已保存到 {filename}")
            return True
        except Exception as e:
            print(f"保存文件时发生错误: {e}")
            return False
    
    def extract_issue_data(self, issue: Dict) -> Dict:
        """
        从原始issue数据中提取重点信息
        
        Args:
            issue: 原始issue数据字典
            
        Returns:
            提取后的重点信息字典
        """
        # 从URL中提取编号
        web_url = issue.get("web_url", "")
        issue_id = ""
        if web_url:
            # 从URL中提取编号，例如：https://gitcode.com/arkui-x/docs/issues/348
            import re
            match = re.search(r'/issues/(\d+)$', web_url)
            if match:
                issue_id = match.group(1)
        
        # 提取标签
        labels = issue.get("labels", [])
        label_names = []
        for label in labels:
            if isinstance(label, dict) and "name" in label:
                label_names.append(label["name"])
        labels_str = "/".join(label_names) if label_names else ""
        
        # 处理时间格式
        def format_datetime(dt_str):
            if not dt_str:
                return ""
            try:
                # 解析时间字符串
                dt = datetime.fromisoformat(dt_str.replace('Z', '+00:00'))
                return dt.strftime("%Y/%m/%d %H:%M:%S")
            except:
                return dt_str
        
        # 提取关键信息
        extracted = {
            "编号": f"{issue.get('project_path_with_namespace', '').replace('arkui-x/', '')}/{issue_id}" if issue_id else "",
            "标题": issue.get("title", ""),
            "链接": web_url,
            "创建时间": format_datetime(issue.get("created_at")),
            # "最近更新时间": format_datetime(issue.get("updated_at")),
            "创建人": issue.get("author", {}).get("name", "") if isinstance(issue.get("author"), dict) else "",
            # "代码仓": issue.get("project_path_with_namespace", ""),
            "标签": labels_str,
            "状态": issue.get("state", "")
        }
        
        return extracted
    
    def load_existing_excel(self, filename: str = "issues.xlsx") -> pd.DataFrame:
        """
        读取现有的Excel文件
        
        Args:
            filename: Excel文件名
            
        Returns:
            包含现有数据的DataFrame，如果文件不存在则返回空DataFrame
        """
        try:
            if os.path.exists(filename):
                # 尝试使用不同的引擎读取Excel文件
                try:
                    df = pd.read_excel(filename, engine='openpyxl')
                except Exception:
                    try:
                        df = pd.read_excel(filename, engine='xlrd')
                    except Exception:
                        # 如果都失败了，尝试默认引擎
                        df = pd.read_excel(filename)
                
                print(f"已读取现有Excel文件 {filename}，共 {len(df)} 条记录")
                
                # 确保DataFrame包含所需的列
                required_columns = ["编号", "标题", "链接", "创建时间", "创建人", "标签", "状态"]
                for col in required_columns:
                    if col not in df.columns:
                        print(f"警告: 现有Excel文件缺少列 '{col}'")
                        df[col] = ""
                
                # 重新排列列顺序以匹配标准格式
                df = df[required_columns]
                
                return df
            else:
                print(f"Excel文件 {filename} 不存在，将创建新文件")
                return pd.DataFrame(columns=["编号", "标题", "链接", "创建时间", "创建人", "标签", "状态"])
        except Exception as e:
            print(f"读取Excel文件时发生错误: {e}")
            return pd.DataFrame(columns=["编号", "标题", "链接", "创建时间", "创建人", "标签", "状态"])
    
    def update_existing_data(self, existing_df: pd.DataFrame, new_issues: List[Dict]) -> pd.DataFrame:
        """
        更新现有数据的状态并合并新数据
        
        Args:
            existing_df: 现有的DataFrame
            new_issues: 新获取的issues列表
            
        Returns:
            更新后的DataFrame
        """
        # 提取新数据
        new_data = []
        for issue in new_issues:
            new_data.append(self.extract_issue_data(issue))
        
        new_df = pd.DataFrame(new_data)
        
        # 如果现有数据为空，直接返回新数据
        if existing_df.empty:
            return new_df
        
        # 创建链接到状态的映射，用于更新现有数据的状态
        link_to_status = {}
        for _, row in new_df.iterrows():
            link = row['链接']
            status = row['状态']
            if link and status:
                link_to_status[link] = status
        
        # 更新现有数据的状态
        updated_count = 0
        for idx, row in existing_df.iterrows():
            link = row['链接']
            if link in link_to_status:
                old_status = existing_df.at[idx, '状态']
                new_status = link_to_status[link]
                if old_status != new_status:
                    existing_df.at[idx, '状态'] = new_status
                    updated_count += 1
                    print(f"更新状态: {link} 从 '{old_status}' 变为 '{new_status}'")
        
        print(f"共更新了 {updated_count} 条记录的状态")
        
        # 获取现有数据的链接集合，用于去重
        existing_links = set(existing_df['链接'].tolist())
        
        # 筛选出不在现有数据中的新记录
        new_records = []
        for _, row in new_df.iterrows():
            if row['链接'] not in existing_links:
                new_records.append(row)
        
        print(f"发现 {len(new_records)} 条新记录")
        
        # 如果有新记录，将它们追加到现有数据中
        if new_records:
            new_records_df = pd.DataFrame(new_records)
            # 合并数据
            combined_df = pd.concat([existing_df, new_records_df], ignore_index=True)
            
            # 按创建时间升序排列
            combined_df['创建时间'] = pd.to_datetime(combined_df['创建时间'], format='%Y/%m/%d %H:%M:%S', errors='coerce')
            combined_df = combined_df.sort_values(by='创建时间', ascending=True)
            
            # 将创建时间格式化回字符串
            combined_df['创建时间'] = combined_df['创建时间'].dt.strftime('%Y/%m/%d %H:%M:%S')
            
            # 重置索引
            combined_df = combined_df.reset_index(drop=True)
            
            return combined_df
        else:
            # 没有新记录，只返回更新后的现有数据
            return existing_df
    
    def save_to_excel(self, filename: str = "issues.xlsx") -> bool:
        """
        将获取到的issues保存到Excel文件，支持更新现有数据
        
        Args:
            filename: 保存的Excel文件名
            
        Returns:
            保存成功返回True，否则返回False
        """
        try:
            # 读取现有数据
            existing_df = self.load_existing_excel(filename)
            
            # 更新现有数据并合并新数据
            updated_df = self.update_existing_data(existing_df, self.all_issues)
            
            # 保存到Excel文件
            updated_df.to_excel(filename, index=False, engine='openpyxl')
            print(f"数据已保存到 {filename}，共 {len(updated_df)} 条记录")
            return True
        except Exception as e:
            print(f"保存Excel文件时发生错误: {e}")
            return False
    
    def print_summary(self):
        """打印获取结果摘要"""
        if not self.all_issues:
            print("没有获取到任何 issues")
            return
            
        print("\n=== 获取结果摘要 ===")
        print(f"总共获取到 {len(self.all_issues)} 条 issues")
        
        # 按状态统计
        state_count = {}
        for issue in self.all_issues:
            state = issue.get("state", "unknown")
            state_count[state] = state_count.get(state, 0) + 1
            
        print("按状态统计:")
        for state, count in state_count.items():
            print(f"  {state}: {count} 条")
            
        # 显示前几条记录作为示例
        print("\n前5条记录预览:")
        for i, issue in enumerate(self.all_issues[:5]):
            title = issue.get("title", "无标题")
            issue_state = issue.get("state", "unknown")
            print(f"  {i+1}. [{issue_state}] {title}")

def main():
    """主函数"""
    print("GitCode arkui-x 项目 issues 获取工具")
    print("=" * 50)
    
    fetcher = GitCodeIssuesFetcher()
    
    # 获取所有状态的issues
    for state in ALL_STATES:
        print(f"\n--- 获取 {state} 状态的 issues ---")
        issues = fetcher.fetch_issues(state=state, per_page=10, created_after=CREATED_AFTER)
        print(f"{state} 状态 issues 获取完成")
        
        if CREATED_AFTER:
            print(f"已过滤创建时间在 {CREATED_AFTER} 之后的issues")
    
    # 保存数据
    if fetcher.all_issues:
        # 先保存为JSON文件（保持原有功能）
        if fetcher.save_to_file("issues.json"):
            print("JSON数据保存成功")
        else:
            print("JSON数据保存失败")
        
        # 保存为Excel文件（新增功能）
        if fetcher.save_to_excel("issues.xlsx"):
            print("Excel数据保存成功")
        else:
            print("Excel数据保存失败")
            
        # 打印摘要
        fetcher.print_summary()
    else:
        print("未能获取到任何 issues 数据")


if __name__ == "__main__":
    main()