#!/usr/bin/env python3
"""
GitCode仓库提交统计分析工具
获取仓库提交记录并进行可视化分析
"""

import json
import os
import random
import time
from datetime import datetime, timedelta
from typing import Dict, List

import matplotlib.pyplot as plt
import pandas as pd
import plotly.graph_objects as go
import requests
from loguru import logger
from plotly.subplots import make_subplots

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False


class GitCodeCommitsAnalyzer:
    """GitCode提交记录分析器"""

    def __init__(self, access_token: str):
        """
        初始化GitCode提交分析器
        
        Args:
            access_token: GitCode访问令牌
        """
        self.access_token = access_token
        self.base_url = "https://api.gitcode.com/api/v5"
        self.session = requests.Session()

        # 设置请求头
        self.session.headers.update({
            'User-Agent': 'GitCode-Python-Client/1.0',
            'Accept': 'application/json',
            'Content-Type': 'application/json'
        })

    def get_repo_commits(self, owner: str, repo: str, **kwargs) -> List[Dict]:
        """
        获取仓库的提交记录
        
        Args:
            owner: 仓库所有者
            repo: 仓库名称
            **kwargs: 其他查询参数
                - ref_name: 分支名称 (默认为默认分支)
                - since: 开始时间 (ISO 8601格式)
                - until: 结束时间 (ISO 8601格式)
                - path: 文件路径过滤
                - author: 作者过滤
                - per_page: 每页数量 (1-100, 默认20)
                - page: 页码 (默认1)
        
        Returns:
            提交记录列表
        """
        url = f"{self.base_url}/repos/{owner}/{repo}/commits"

        logger.info(url)

        # 动态获取访问令牌以避免API调用限制
        current_token = get_random_access_token()

        # 构建查询参数，将所有参数都放在URL中
        params = {
            'access_token': current_token,
        }

        # 添加可选参数到查询参数中
        optional_params = ['ref_name', 'since', 'until', 'path', 'author']
        for param in optional_params:
            if param in kwargs and kwargs[param]:
                params[param] = kwargs[param]

        try:
            # 添加请求间隔以避免API频率限制
            time.sleep(2.0)  # 每次请求间隔2秒，确保不超过API限制

            # 重试机制：最多重试3次
            max_retries = 3
            for attempt in range(max_retries + 1):
                try:
                    # 按照curl示例使用GET请求，所有参数通过URL传递
                    response = self.session.get(
                        url,
                        params=params,
                        headers={'Content-Type': 'application/json'},
                        data='{}'
                    )
                    response.raise_for_status()
                    return response.json()
                except requests.exceptions.HTTPError as e:
                    if response.status_code == 429:
                        wait_time = 60 * (attempt + 1)  # 递增等待时间
                        logger.warning(f"API调用频率超限，第{attempt + 1}次重试，等待{wait_time}秒: {url}")
                        if attempt < max_retries:
                            time.sleep(wait_time)
                            continue
                        else:
                            logger.error(f"达到最大重试次数，放弃请求: {url}")
                            return []
                    elif response.status_code >= 500:
                        # 服务器错误，可以重试
                        wait_time = 10 * (attempt + 1)  # 递增等待时间
                        logger.warning(
                            f"服务器错误 {response.status_code}，第{attempt + 1}次重试，等待{wait_time}秒: {url}")
                        if attempt < max_retries:
                            time.sleep(wait_time)
                            continue
                        else:
                            logger.error(f"达到最大重试次数，服务器错误: {e}")
                            return []
                    else:
                        # 其他HTTP错误，不重试
                        logger.error(f"HTTP错误 {response.status_code}: {e}")
                        logger.error(f"请求URL: {url}")
                        return []
                except requests.exceptions.RequestException as e:
                    # 网络错误，可以重试
                    wait_time = 5 * (attempt + 1)  # 递增等待时间
                    logger.warning(f"网络错误，第{attempt + 1}次重试，等待{wait_time}秒: {e}")
                    if attempt < max_retries:
                        time.sleep(wait_time)
                        continue
                    else:
                        logger.error(f"达到最大重试次数，网络错误: {e}")
                        logger.error(f"请求URL: {url}")
                        return []
        except json.JSONDecodeError as e:
            logger.error(f"JSON解析失败: {e}")
            return []

    def get_all_commits(self, owner: str, repo: str, **kwargs) -> List[Dict]:
        """
        获取仓库所有提交记录（自动分页）
        
        Args:
            owner: 仓库所有者
            repo: 仓库名称
            **kwargs: 查询参数
        
        Returns:
            所有提交记录列表
        """
        all_commits = []
        page = 1
        per_page = kwargs.get('per_page', 100)

        try:
            import streamlit as st
            st.write(f"正在获取仓库 {owner}/{repo} 的提交记录...")
        except ImportError:
            logger.info(f"正在获取仓库 {owner}/{repo} 的提交记录...")

        while True:
            try:
                import streamlit as st
                st.write(f"正在获取第 {page} 页数据...")
            except ImportError:
                logger.info(f"正在获取第 {page} 页数据...")

            commits = self.get_repo_commits(
                owner, repo,
                page=page,
                per_page=per_page,
                **{k: v for k, v in kwargs.items() if k not in ['page', 'per_page']}
            )

            if not commits or len(commits) == 0:
                break

            all_commits.extend(commits)

            # 如果返回的数据少于per_page，说明已经是最后一页
            if len(commits) < per_page:
                break

            page += 1

            # 防止请求过于频繁
            import time
            time.sleep(0.1)

        try:
            import streamlit as st
            st.write(f"总共获取到 {len(all_commits)} 条提交记录")
        except ImportError:
            logger.info(f"总共获取到 {len(all_commits)} 条提交记录")
        return all_commits

    def get_repo_branches(self, owner: str, repo: str) -> List[str]:
        """
        获取仓库的所有分支列表
        
        Args:
            owner: 仓库所有者
            repo: 仓库名称
        
        Returns:
            分支名称列表
        """
        url = f"{self.base_url}/repos/{owner}/{repo}/branches"

        # 动态获取访问令牌
        current_token = get_random_access_token()

        params = {
            'access_token': current_token
        }

        try:
            time.sleep(0.1)  # 请求间隔
            response = self.session.get(url, params=params)
            response.raise_for_status()

            branches_data = response.json()
            branch_names = [branch.get('name', '') for branch in branches_data if branch.get('name')]

            logger.info(f"仓库 {owner}/{repo} 共有 {len(branch_names)} 个分支: {branch_names}")
            return branch_names

        except Exception as e:
            logger.error(f"获取仓库 {owner}/{repo} 分支列表失败: {e}")
            # 返回默认分支
            return ['main']

    def get_commit_statistics(self, owner: str, repo: str, **kwargs) -> Dict:
        """
        获取仓库的提交统计信息（遍历所有分支）
        
        Args:
            owner: 仓库所有者
            repo: 仓库名称
            **kwargs: 查询参数
                - since: 开始时间 (ISO 8601格式)
                - only_self: 是否只获取自己的统计 (可选)
        
        Returns:
            包含commits和statistics的字典（汇总所有分支的结果）
        """
        # 先获取仓库的所有分支
        branches = self.get_repo_branches(owner, repo)
        logger.info(f"仓库 {owner}/{repo} 共有 {len(branches)} 个分支需要处理: {branches}")

        # 初始化汇总结果
        all_commits = []
        all_statistics = {}
        total_count = 0

        url = f"{self.base_url}/{owner}/{repo}/repository/commit_statistics"

        # 遍历所有分支获取统计数据
        for branch_name in branches:
            logger.info(f"正在获取分支 {branch_name} 的统计数据...")

            # 动态获取访问令牌以避免API调用限制
            current_token = get_random_access_token()

            # 构建查询参数
            params = {
                'access_token': current_token,
                'branch_name': branch_name
            }

            # 添加可选参数
            if 'since' in kwargs and kwargs['since']:
                params['since'] = kwargs['since']
            if 'only_self' in kwargs:
                params['only_self'] = kwargs['only_self']

            # 调试日志：输出完整的请求URL和参数
            logger.debug(f"构建的URL: {url}")
            logger.debug(f"请求参数: {params}")

            try:
                # 添加请求间隔以避免API频率限制
                time.sleep(2.0)  # 每次请求间隔4秒，确保不超过API限制

                # 重试机制：最多重试3次
                max_retries = 3
                branch_data = None

                for attempt in range(max_retries + 1):
                    try:
                        response = self.session.get(url, params=params)
                        response.raise_for_status()
                        branch_data = response.json()
                        break  # 成功获取数据，跳出重试循环
                    except requests.exceptions.HTTPError as e:
                        if response.status_code == 429:
                            wait_time = 60 * (attempt + 1)  # 递增等待时间
                            logger.warning(f"API调用频率超限，第{attempt + 1}次重试，等待{wait_time}秒: {url}")
                            if attempt < max_retries:
                                time.sleep(wait_time)
                                continue
                            else:
                                logger.error(f"达到最大重试次数，跳过分支 {branch_name}: {url}")
                                break
                        elif response.status_code >= 500:
                            # 服务器错误，可以重试
                            wait_time = 10 * (attempt + 1)  # 递增等待时间
                            logger.warning(
                                f"服务器错误 {response.status_code}，第{attempt + 1}次重试，等待{wait_time}秒: {url}")
                            if attempt < max_retries:
                                time.sleep(wait_time)
                                continue
                            else:
                                logger.error(f"达到最大重试次数，服务器错误，跳过分支 {branch_name}: {e}")
                                break
                        else:
                            # 其他HTTP错误，不重试
                            logger.error(f"HTTP错误 {response.status_code}，跳过分支 {branch_name}: {e}")
                            logger.error(f"请求URL: {url}")
                            break
                    except requests.exceptions.RequestException as e:
                        # 网络错误，可以重试
                        wait_time = 5 * (attempt + 1)  # 递增等待时间
                        logger.warning(f"网络错误，第{attempt + 1}次重试，等待{wait_time}秒: {e}")
                        if attempt < max_retries:
                            time.sleep(wait_time)
                            continue
                        else:
                            logger.error(f"达到最大重试次数，网络错误，跳过分支 {branch_name}: {e}")
                            logger.error(f"请求URL: {url}")
                            break

                # 处理获取到的分支数据
                if branch_data:
                    # 合并commits数据
                    branch_commits = branch_data.get('commits', [])
                    for commit in branch_commits:
                        commit['branch'] = branch_name  # 添加分支信息
                    all_commits.extend(branch_commits)

                    # 合并statistics数据
                    branch_statistics = branch_data.get('statistics', [])
                    for stat in branch_statistics:
                        user_name = stat.get('user_name', '')
                        if user_name not in all_statistics:
                            all_statistics[user_name] = {
                                'user_name': user_name,
                                'nick_name': stat.get('nick_name', user_name),
                                'avatar_url': stat.get('avatar_url', ''),
                                'commit_count': 0,
                                'add_lines': 0,
                                'delete_lines': 0,
                                'branches': []
                            }

                        # 累加统计数据
                        all_statistics[user_name]['commit_count'] += stat.get('commit_count', 0)
                        all_statistics[user_name]['add_lines'] += stat.get('add_lines', 0)
                        all_statistics[user_name]['delete_lines'] += stat.get('delete_lines', 0)
                        all_statistics[user_name]['branches'].append({
                            'branch': branch_name,
                            'commit_count': stat.get('commit_count', 0),
                            'add_lines': stat.get('add_lines', 0),
                            'delete_lines': stat.get('delete_lines', 0)
                        })

                    total_count += branch_data.get('total', 0)

            except json.JSONDecodeError as e:
                logger.error(f"JSON解析失败，跳过分支 {branch_name}: {e}")
                continue
            except Exception as e:
                logger.error(f"处理分支 {branch_name} 时发生未知错误: {e}")
                continue

        # 转换all_statistics字典为列表格式
        statistics_list = list(all_statistics.values())

        logger.info(
            f"汇总完成：共处理 {len(branches)} 个分支，获得 {len(all_commits)} 个提交，{len(statistics_list)} 个用户统计")

        return {
            'commits': all_commits,
            'statistics': statistics_list,
            'total': total_count,
            'branches_count': len(branches),
            'branches': branches
        }

    def get_student_level_statistics(self, owner: str, repo_names: List[str], progress_callback=None, **kwargs) -> List[
        Dict]:
        """
        获取提交者级别的统计信息
        
        Args:
            owner: 仓库所有者
            repo_names: 仓库名称列表
            **kwargs: 查询参数
                - since: 开始时间 (ISO 8601格式)
        
        Returns:
            提交者统计信息列表
        """
        student_stats = {}
        total_repos = len(repo_names)

        logger.info(f"开始获取 {total_repos} 个仓库的提交者统计数据...")

        for i, repo_name in enumerate(repo_names, 1):
            try:
                logger.info(f"正在处理仓库 {i}/{total_repos}: {repo_name}")

                # 获取每个仓库的提交统计
                stats_data = self.get_commit_statistics(owner, repo_name, **kwargs)

                # 更新进度，包含分支信息
                if progress_callback:
                    branches_count = stats_data.get('branches_count', 0)
                    progress_callback(i, total_repos, repo_name, branches_count)

                # 处理statistics数据（总提交数统计）
                for stat in stats_data.get('statistics', []):
                    user_name = stat.get('user_name', '')
                    nick_name = stat.get('nick_name', user_name)

                    if user_name not in student_stats:
                        student_stats[user_name] = {
                            'user_name': user_name,
                            'nick_name': nick_name,
                            'avatar_url': stat.get('avatar_url', ''),
                            'total_commits': 0,
                            'total_add_lines': 0,
                            'total_delete_lines': 0,
                            'active_days': 0,
                            'commit_dates': set(),
                            'repos_involved': set(),
                            'projects': [],
                            'last_active_time': None
                        }

                    # 累加统计数据
                    student_stats[user_name]['total_commits'] += stat.get('commit_count', 0)
                    student_stats[user_name]['total_add_lines'] += stat.get('add_lines', 0)
                    student_stats[user_name]['total_delete_lines'] += stat.get('delete_lines', 0)
                    student_stats[user_name]['repos_involved'].add(repo_name)

                    # 添加项目详情
                    student_stats[user_name]['projects'].append({
                        'repo_name': repo_name,
                        'project_id': stat.get('project_id'),
                        'branch': stat.get('branch', 'main'),
                        'commit_count': stat.get('commit_count', 0),
                        'add_lines': stat.get('add_lines', 0),
                        'delete_lines': stat.get('delete_lines', 0)
                    })

                # 处理commits数据（计算活跃天数）
                for commit in stats_data.get('commits', []):
                    author_name = commit.get('author_name', '')
                    commit_date = commit.get('date', '')

                    if author_name and commit_date:
                        if author_name not in student_stats:
                            student_stats[author_name] = {
                                'user_name': author_name,
                                'nick_name': author_name,
                                'avatar_url': '',
                                'total_commits': 0,
                                'total_add_lines': 0,
                                'total_delete_lines': 0,
                                'active_days': 0,
                                'commit_dates': set(),
                                'repos_involved': set(),
                                'projects': [],
                                'last_active_time': None
                            }

                        # 添加提交日期到集合中（自动去重）
                        student_stats[author_name]['commit_dates'].add(commit_date)
                        student_stats[author_name]['repos_involved'].add(repo_name)
                        # 更新最后活跃时间
                        if student_stats[author_name]['last_active_time'] is None or commit_date > \
                                student_stats[author_name]['last_active_time']:
                            student_stats[author_name]['last_active_time'] = commit_date

            except Exception as e:
                logger.error(f"获取仓库 {repo_name} 的统计数据失败: {e}")
                continue

        # 转换为列表并计算有效代码行数和活跃天数
        result = []
        for user_name, stats in student_stats.items():
            stats['repos_involved'] = len(stats['repos_involved'])
            stats['total_code_lines'] = stats['total_add_lines'] - stats['total_delete_lines']
            # 计算活跃天数（基于commits中的唯一日期数）
            stats['active_days'] = len(stats['commit_dates'])

            # 计算月均提交次数
            if stats['active_days'] > 0 and stats['last_active_time']:
                try:
                    # 解析最后活跃时间
                    last_active = datetime.fromisoformat(stats['last_active_time'].replace('Z', '+00:00'))
                    # 计算活跃时间跨度（天数）
                    first_commit_dates = sorted(list(stats['commit_dates']))
                    if first_commit_dates:
                        first_active = datetime.fromisoformat(first_commit_dates[0].replace('Z', '+00:00'))
                        active_span_days = (last_active - first_active).days + 1  # 加1确保至少1天
                        # 计算月均提交次数（30.44天为一个月）
                        if active_span_days > 30:
                            stats['annual_commit_rate'] = stats['total_commits'] / active_span_days * 30.44
                        else:
                            stats['annual_commit_rate'] = stats['total_commits']  # 单日提交
                    else:
                        stats['annual_commit_rate'] = 0.0
                except Exception as e:
                    logger.warning(f"计算用户 {user_name} 月均提交次数失败: {e}")
                    stats['annual_commit_rate'] = 0.0
            else:
                stats['annual_commit_rate'] = 0.0

            # 格式化最后活跃时间
            if stats['last_active_time']:
                stats['last_active_time'] = stats['last_active_time'][:10]  # 只保留日期部分
            # 移除临时的commit_dates集合
            del stats['commit_dates']
            result.append(stats)

        # 按提交数排序
        result.sort(key=lambda x: x['total_commits'], reverse=True)

        return result

    def commits_to_dataframe(self, commits: List[Dict]) -> pd.DataFrame:
        """
        将提交数据转换为DataFrame
        
        Args:
            commits: 提交记录列表
        
        Returns:
            包含提交信息的DataFrame
        """
        if not commits:
            return pd.DataFrame()

        processed_data = []
        for commit in commits:
            commit_info = commit.get('commit', {})
            author_info = commit_info.get('author', {})
            committer_info = commit_info.get('committer', {})

            # 外层的author和committer信息
            outer_author = commit.get('author', {})
            outer_committer = commit.get('committer', {})

            # 实现作者字段的回退策略
            def get_author_field(field_name):
                """获取作者字段，实现回退策略"""
                # 优先级：commit.author -> author -> commit.committer -> committer
                candidates = [
                    author_info.get(field_name, ''),
                    outer_author.get('login' if field_name == 'name' else field_name, ''),
                    committer_info.get(field_name, ''),
                    outer_committer.get('login' if field_name == 'name' else field_name, '')
                ]

                # 返回第一个非空值
                for candidate in candidates:
                    if candidate and str(candidate).strip():
                        return str(candidate).strip()
                return ''

            def get_date_field():
                """获取日期字段，实现回退策略"""
                # 优先级：commit.author.date -> commit.committer.date -> committer.date
                candidates = [
                    author_info.get('date'),
                    committer_info.get('date'),
                    outer_committer.get('date')
                ]

                # 返回第一个非空值
                for candidate in candidates:
                    if candidate:
                        return candidate
                return None

            processed_commit = {
                'sha': commit.get('sha'),
                'message': commit_info.get('message', ''),
                'author_name': get_author_field('name'),
                'author_email': get_author_field('email'),
                'author_date': get_date_field(),
                'committer_name': committer_info.get('name', ''),
                'committer_email': committer_info.get('email', ''),
                'committer_date': committer_info.get('date'),
                'html_url': commit.get('html_url'),
                'comments_url': commit.get('comments_url'),
                # 统计信息（如果有的话）
                'additions': commit.get('stats', {}).get('additions', 0),
                'deletions': commit.get('stats', {}).get('deletions', 0),
                'total_changes': commit.get('stats', {}).get('total', 0),
                # 文件信息
                'files_changed': len(commit.get('files', [])),
            }
            processed_data.append(processed_commit)

        df = pd.DataFrame(processed_data)

        # 转换时间字段
        time_columns = ['author_date', 'committer_date']
        for col in time_columns:
            if col in df.columns:
                df[col] = pd.to_datetime(df[col], errors='coerce')

        # 添加派生字段
        if 'author_date' in df.columns:
            df['date'] = df['author_date'].dt.date
            df['hour'] = df['author_date'].dt.hour
            df['weekday'] = df['author_date'].dt.day_name()
            df['month'] = df['author_date'].dt.month
            df['year'] = df['author_date'].dt.year
            df['week'] = df['author_date'].dt.isocalendar().week

        return df

    def analyze_commits(self, df: pd.DataFrame) -> Dict:
        """
        分析提交数据
        
        Args:
            df: 提交数据DataFrame
        
        Returns:
            分析结果字典
        """
        if df.empty:
            return {}

        analysis = {
            'total_commits': len(df),
            'unique_authors': df['author_name'].nunique(),
            'date_range': {
                'start': df['author_date'].min(),
                'end': df['author_date'].max()
            },
            'top_contributors': df['author_name'].value_counts().head(10).to_dict(),
            'commits_by_hour': df['hour'].value_counts().sort_index().to_dict(),
            'commits_by_weekday': df['weekday'].value_counts().to_dict(),
            'commits_by_month': df.groupby(['year', 'month']).size().to_dict(),
            'total_changes': {
                'additions': df['additions'].sum(),
                'deletions': df['deletions'].sum(),
                'total': df['total_changes'].sum()
            },
            'avg_changes_per_commit': {
                'additions': df['additions'].mean(),
                'deletions': df['deletions'].mean(),
                'total': df['total_changes'].mean()
            }
        }

        return analysis

    def create_visualizations(self, df: pd.DataFrame, output_dir: str = "./"):
        """
        创建可视化图表
        
        Args:
            df: 提交数据DataFrame
            output_dir: 输出目录
        """
        if df.empty:
            logger.warning("没有数据可供可视化")
            return

        # 1. 贡献者提交统计
        plt.figure(figsize=(12, 8))
        top_contributors = df['author_name'].value_counts().head(15)
        plt.subplot(2, 2, 1)
        top_contributors.plot(kind='bar')
        plt.title('Top 15 贡献者提交统计')
        plt.xlabel('贡献者')
        plt.ylabel('提交次数')
        plt.xticks(rotation=45)

        # 2. 每小时提交分布
        plt.subplot(2, 2, 2)
        hourly_commits = df['hour'].value_counts().sort_index()
        plt.plot(hourly_commits.index, hourly_commits.values, marker='o')
        plt.title('每小时提交分布')
        plt.xlabel('小时')
        plt.ylabel('提交次数')
        plt.grid(True)

        # 3. 每周提交分布
        plt.subplot(2, 2, 3)
        weekday_order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
        weekday_commits = df['weekday'].value_counts().reindex(weekday_order, fill_value=0)
        weekday_commits.plot(kind='bar')
        plt.title('每周提交分布')
        plt.xlabel('星期')
        plt.ylabel('提交次数')
        plt.xticks(rotation=45)

        # 4. 月度提交趋势
        plt.subplot(2, 2, 4)
        monthly_commits = df.groupby(df['author_date'].dt.to_period('M')).size()
        monthly_commits.plot(kind='line', marker='o')
        plt.title('月度提交趋势')
        plt.xlabel('月份')
        plt.ylabel('提交次数')
        plt.xticks(rotation=45)

        plt.tight_layout()
        plt.savefig(f"{output_dir}/commits_analysis.png", dpi=300, bbox_inches='tight')
        plt.show()

        # 创建交互式图表
        self._create_interactive_charts(df, output_dir)

    def _create_interactive_charts(self, df: pd.DataFrame, output_dir: str):
        """
        创建交互式图表
        
        Args:
            df: 提交数据DataFrame
            output_dir: 输出目录
        """
        # 1. 贡献者活跃度热力图
        fig = make_subplots(
            rows=2, cols=2,
            subplot_titles=('贡献者提交统计', '每日提交热力图', '代码变更统计', '提交时间分布'),
            specs=[[{"type": "bar"}, {"type": "heatmap"}],
                   [{"type": "bar"}, {"type": "scatter"}]]
        )

        # 贡献者统计
        top_contributors = df['author_name'].value_counts().head(10)
        fig.add_trace(
            go.Bar(x=top_contributors.values, y=top_contributors.index, orientation='h',
                   name='提交次数'),
            row=1, col=1
        )

        # 每日提交热力图
        if 'date' in df.columns:
            daily_commits = df.groupby(['author_name', 'date']).size().reset_index(name='commits')
            pivot_data = daily_commits.pivot(index='author_name', columns='date', values='commits').fillna(0)

            if not pivot_data.empty:
                fig.add_trace(
                    go.Heatmap(
                        z=pivot_data.values,
                        x=[str(d) for d in pivot_data.columns],
                        y=pivot_data.index,
                        colorscale='Blues',
                        name='每日提交'
                    ),
                    row=1, col=2
                )

        # 代码变更统计
        if 'total_changes' in df.columns:
            author_changes = df.groupby('author_name')['total_changes'].sum().sort_values(ascending=False).head(10)
            fig.add_trace(
                go.Bar(x=author_changes.index, y=author_changes.values, name='代码变更量'),
                row=2, col=1
            )

        # 提交时间分布散点图
        if 'author_date' in df.columns:
            fig.add_trace(
                go.Scatter(
                    x=df['author_date'],
                    y=df['author_name'],
                    mode='markers',
                    marker=dict(size=8, opacity=0.6),
                    name='提交时间'
                ),
                row=2, col=2
            )

        fig.update_layout(
            height=800,
            title_text="Git提交统计分析",
            showlegend=False
        )

        fig.write_html(f"{output_dir}/commits_interactive.html")
        logger.info(f"交互式图表已保存到: {output_dir}/commits_interactive.html")

    def generate_report(self, owner: str, repo: str, **kwargs) -> pd.DataFrame:
        """
        生成完整的提交分析报告
        
        Args:
            owner: 仓库所有者
            repo: 仓库名称
            **kwargs: 查询参数
        
        Returns:
            提交数据DataFrame
        """
        # 获取提交数据
        commits = self.get_all_commits(owner, repo, **kwargs)
        df = self.commits_to_dataframe(commits)

        if df.empty:
            logger.warning("未获取到提交数据")
            return df

        # 分析数据
        analysis = self.analyze_commits(df)

        # 输出分析结果
        logger.info("\n=== Git提交统计分析报告 ===")
        logger.info(f"仓库: {owner}/{repo}")
        logger.info(f"总提交数: {analysis['total_commits']}")
        logger.info(f"贡献者数量: {analysis['unique_authors']}")
        logger.info(f"时间范围: {analysis['date_range']['start']} 到 {analysis['date_range']['end']}")

        logger.info("\n=== Top 10 贡献者 ===")
        for author, count in list(analysis['top_contributors'].items())[:10]:
            logger.info(f"{author}: {count} 次提交")

        logger.info("\n=== 代码变更统计 ===")
        logger.info(f"总新增行数: {analysis['total_changes']['additions']:,}")
        logger.info(f"总删除行数: {analysis['total_changes']['deletions']:,}")
        logger.info(f"总变更行数: {analysis['total_changes']['total']:,}")

        # 保存数据
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        csv_file = f"{owner}_{repo}_commits_{timestamp}.csv"
        df.to_csv(csv_file, index=False, encoding='utf-8-sig')
        logger.info(f"\n提交数据已保存到: {csv_file}")

        # 创建可视化
        self.create_visualizations(df)

        return df


def get_random_access_token():
    """从环境变量中获取随机访问令牌"""
    token_str = os.getenv('GITCODE_ACCESS_TOKEN')
    if not token_str:
        logger.error("请设置环境变量 GITCODE_ACCESS_TOKEN")
        return None

    # 支持逗号分隔的多个令牌
    tokens = [token.strip() for token in token_str.split(',') if token.strip()]
    if not tokens:
        logger.error("GITCODE_ACCESS_TOKEN 格式错误，请提供有效的令牌")
        return None

    # 随机选择一个令牌
    selected_token = random.choice(tokens)
    logger.info(f"已选择令牌（前8位）: {selected_token[:8]}...")
    return selected_token


def main():
    """主函数 - 示例用法"""
    # 配置参数
    ACCESS_TOKEN = get_random_access_token()
    if not ACCESS_TOKEN:
        return None

    # 从之前获取的仓库列表中选择要分析的仓库
    # 这里使用示例仓库，您可以替换为实际的仓库
    OWNER = "dlut-water"  # 组织名
    REPO = "your-repo-name"  # 仓库名（需要替换为实际仓库名）

    # 创建分析器
    analyzer = GitCodeCommitsAnalyzer(ACCESS_TOKEN)

    # 可选：设置时间范围（最近6个月）
    since_date = (datetime.now() - timedelta(days=180)).isoformat()

    # 生成报告
    df = analyzer.generate_report(
        OWNER, REPO,
        since=since_date,  # 最近6个月的提交
        per_page=100  # 每页100条记录
    )

    # 额外的分析示例
    if not df.empty:
        logger.info("\n=== 额外分析 ===")

        # 最活跃的提交时间
        most_active_hour = df['hour'].mode().iloc[0]
        logger.info(f"最活跃的提交时间: {most_active_hour}:00")

        # 最活跃的提交日
        most_active_day = df['weekday'].mode().iloc[0]
        logger.info(f"最活跃的提交日: {most_active_day}")

        # 平均每日提交数
        days_span = (df['author_date'].max() - df['author_date'].min()).days
        if days_span > 0:
            avg_commits_per_day = len(df) / days_span
            logger.info(f"平均每日提交数: {avg_commits_per_day:.2f}")

        # 代码变更最多的贡献者
        if 'total_changes' in df.columns:
            top_changer = df.groupby('author_name')['total_changes'].sum().idxmax()
            top_changes = df.groupby('author_name')['total_changes'].sum().max()
            logger.info(f"代码变更最多的贡献者: {top_changer} ({top_changes:,} 行)")


if __name__ == "__main__":
    # 首先需要从之前的脚本获取仓库列表
    logger.info("请先运行 gitcode_repos_fetcher.py 获取仓库列表")
    logger.info("然后修改 REPO 变量为要分析的具体仓库名")

    # 取消注释下面的行来运行分析
    # main()
