import json
import os
from datetime import datetime
from typing import Dict, Any

import pandas as pd
from langchain.chains import LLMChain
from langchain.llms import Ollama
from langchain.prompts import PromptTemplate
from loguru import logger

try:
    from langchain_community.llms import Tongyi
except ImportError:
    Tongyi = None


class ContributorReportGenerator:
    """贡献者数据智能分析报告生成器"""

    def __init__(self,
                 llm_provider: str = "ollama",
                 ollama_base_url: str = "http://10.48.0.81:11434",
                 model_name: str = "qwq:latest",
                 dashscope_api_key: str = None):
        """初始化报告生成器
        
        Args:
            llm_provider: LLM提供商 ("ollama" 或 "dashscope")
            ollama_base_url: Ollama服务地址
            model_name: 使用的模型名称
            dashscope_api_key: DashScope API密钥
        """
        self.llm_provider = llm_provider
        self.ollama_base_url = ollama_base_url
        self.model_name = model_name
        self.dashscope_api_key = dashscope_api_key or os.getenv('DASHSCOPE_API_KEY')
        self.llm = None
        self._initialize_llm()

    def _initialize_llm(self):
        """初始化LLM连接"""
        try:
            if self.llm_provider == "dashscope":
                if Tongyi is None:
                    raise ImportError("请安装 langchain-community 以使用 DashScope")
                if not self.dashscope_api_key:
                    raise ValueError("使用DashScope需要提供API密钥")

                os.environ['DASHSCOPE_API_KEY'] = self.dashscope_api_key
                self.llm = Tongyi(
                    model_name=self.model_name,
                    temperature=0.7
                )
                logger.info(f"成功连接到DashScope服务，模型: {self.model_name}")
            else:
                # 默认使用Ollama
                self.llm = Ollama(
                    base_url=self.ollama_base_url,
                    model=self.model_name,
                    temperature=0.7
                )
                logger.info(f"成功连接到Ollama服务: {self.ollama_base_url}")
        except Exception as e:
            logger.error(f"连接LLM服务失败: {e}")
            raise

    def _create_analysis_prompt(self) -> PromptTemplate:
        """创建数据分析提示词模板"""
        template = """
You are a senior R&D team management consultant and data analysis expert. Please conduct an in-depth analysis based on the following contributor data and generate a professional management decision report.

## Data Overview
Total Contributors: {total_contributors}
Analysis Date: {analysis_date}

## Core Metrics Data
{core_metrics_data}

## S级评分贡献者 (卓越表现者)
{s_level_contributors_data}

## Bottom 10 Contributors (Recent 3 Months - Need Improvement)
{bottom10_contributors_data}

## Statistical Calculation Process
{calculation_process}

## Analysis Requirements
Please conduct in-depth analysis from the following dimensions:

1. **Team Overall Performance Analysis**
   - Analyze team's overall activity and contribution distribution, identify "80-20 rule" risks
   - Identify core contributors, potential contributors, and personnel needing improvement
   - Evaluate team collaboration efficiency and code quality trends, focusing on PR review quality
   - Analyze team knowledge distribution and single point of failure risks
   - Analyze performance gaps between top and bottom performers

2. **Individual Contributor Profiling and Rating Analysis**
   **2.1 S-Level Contributors Analysis (Excellent Performers)**
   - Analyze best practices and success patterns of S-level contributors
   - Identify their key strengths, contribution models, and leadership qualities
   - Evaluate their mentoring potential and knowledge transfer capabilities
   - Assess their role in driving team excellence and innovation
   
   **2.2 Underperforming Contributors Analysis (Bottom 10 in Recent 3 Months)**
   - Conduct detailed analysis of contributors needing improvement, especially those below S-level
   - For non-S-level personnel, must provide detailed scoring rationale and calculation process
   - Analyze each person's contribution characteristics, strength areas, and obvious weaknesses
   - **Focus on first commit time and last commit time**: Analyze contributors' participation history, active cycles, and continuity
   - Evaluate contributors' project loyalty and long-term value based on time span
   - **Project Participation Evaluation Standards**:
     * Annual projects >= 20: Extremely high project participation, potential risk of scattered energy
     * Annual projects 10-20: Very high project participation, versatile talent type
     * Annual projects 5-10: Moderate project participation, balanced professional depth and breadth
     * Annual projects 2-5: Average project participation, may focus on specific areas
     * Annual projects < 2: Low project participation, need attention to workload saturation
   - Identify contributors who need immediate attention, cultivation, or role adjustment
   - Conduct sharp assessment of PR quality and code review participation

3. **R&D Management Pain Point Diagnosis and Solutions**
   - Provide direct and sharp diagnosis of key management issues discovered from data
   - **Emphasize project manager dereliction of duty**: For inefficiency, quality issues, collaboration chaos in the team, must clearly point out this is management dereliction of project managers, including but not limited to:
     * Failure to establish effective code review mechanisms leading to quality issues
     * Lack of reasonable task allocation and workload balance management
     * Inaccurate assessment of team member capabilities, improper personnel allocation
     * Failure to timely identify and resolve team collaboration issues
     * Lack of effective performance monitoring and improvement mechanisms
   - **Emphasize CTO leadership responsibility**: As the technical leader, CTO bears ultimate responsibility for team performance and technical direction. Must clearly point out CTO's critical role in:
     * Setting technical standards and architectural decisions that impact team productivity
     * Establishing engineering culture and best practices across the organization
     * Ensuring proper resource allocation and technical talent development
     * Driving innovation while maintaining code quality and delivery excellence
     * Building scalable processes that support team growth and efficiency
   - **Focus on performance gap management**: Analyze the significant performance differences between top and bottom contributors, identify management failures in talent development and performance improvement
   - Propose specific solutions for project management chaos and irregular processes
   - Focus on analyzing defects in code review processes and improvement suggestions
   - Identify efficiency bottlenecks and quality risk points in team collaboration

4. **R&D Management Tools and Process Optimization Recommendations**
   - **Project Management Tool Recommendations**: Based on team size and collaboration mode, recommend specific project management tools (such as Jira, Asana, Notion, etc.) and implementation plans
   - **OKR Formulation and Execution Guidance**: Provide OKR formulation templates, execution tracking mechanisms, and assessment standards for R&D teams
   - **R&D Process Optimization**: Provide specific actionable improvement suggestions for code review processes, branch management strategies, release processes, etc.
   - **Team Collaboration Mechanisms**: Suggest specific meeting systems, communication mechanisms, and knowledge sharing processes
   - **Mentorship and Talent Development Programs**: Establish structured programs leveraging top performers to guide underperforming contributors
   - **Performance Improvement Frameworks**: Design specific improvement plans for bottom contributors with clear milestones and accountability measures

5. **Quantitative Indicator Interpretation and Management Decision Support**
   - Explain the practical significance of various indicators for team management
   - **In-depth analysis of management value of first commit time and last commit time**:
     * Identify contribution pattern differences between new and veteran employees
     * Discover activity trend changes of long-term contributors
     * Evaluate team members' project lifecycle participation
   - **Correct understanding of project participation**:
     * Project quantity should be comprehensively evaluated combining project scale, complexity, and time span
     * Annual 20+ projects usually indicate high activity and diversified technical capabilities
     * Avoid simply equating project quantity with work quality
     * Focus on depth of project contributions and technical impact
   - Analyze management issues behind indicator anomalies
   - Provide data-based personnel allocation and task assignment recommendations
   - Establish quantitative indicator systems for continuous monitoring and improvement

## Output Format Requirements
Please output in professional management consulting report format, including:
- **Executive Summary**: Core issues and key recommendations, highlighting the performance gap between top and bottom contributors
- **Team Status Diagnosis**: Objective data analysis and problem identification
- **Individual Assessment Report**: Detailed analysis and development suggestions for each member, with special focus on top performers' best practices and bottom performers' improvement areas
- **Management Improvement Plan**: Specific executable management tools and process optimization recommendations
- **Action Plan**: Specific steps and timelines for phased implementation

The report must:
- Directly address management pain points, avoid vague suggestions
- Provide specific actionable solutions
- Be sharp and direct in problem diagnosis, not avoiding contradictions
- Control word count to 3000-4000 words, ensuring substantial content
- **STRICTLY FORBIDDEN: Do not use any Mermaid diagrams or flowcharts in the output**
- **IMPORTANT: Please output the final report in Chinese language**
"""

        return PromptTemplate(
            input_variables=[
                "total_contributors", "analysis_date",
                "core_metrics_data", "s_level_contributors_data",
                "bottom10_contributors_data", "calculation_process"
            ],
            template=template
        )

    def _prepare_data_summary(self, df: pd.DataFrame) -> Dict[str, Any]:
        """准备数据摘要信息
        
        Args:
            df: 贡献者数据DataFrame
            
        Returns:
            包含数据摘要的字典
        """
        # 基础统计
        total_contributors = len(df)

        # 核心指标统计
        metrics_summary = {
            "月度提交数统计": {
                "平均值": f"{df['month_commit_rate'].mean():.2f}",
                "最大值": f"{df['month_commit_rate'].max():.2f}",
                "最小值": f"{df['month_commit_rate'].min():.2f}",
                "标准差": f"{df['month_commit_rate'].std():.2f}"
            },
            "年度提交数统计": {
                "平均值": f"{df['annual_commit_rate'].mean():.2f}",
                "最大值": f"{df['annual_commit_rate'].max():.2f}",
                "最小值": f"{df['annual_commit_rate'].min():.2f}",
                "标准差": f"{df['annual_commit_rate'].std():.2f}"
            },
            "综合评分统计": {
                "平均值": f"{df['comprehensive_score'].mean():.0f}",
                "最大值": f"{df['comprehensive_score'].max():.0f}",
                "最小值": f"{df['comprehensive_score'].min():.0f}",
                "标准差": f"{df['comprehensive_score'].std():.0f}"
            }
        }

        # S级评分的贡献者详细信息
        s_level_contributors = []
        s_level_df = df[df['comprehensive_rating'] == 'S级 (卓越)']

        for idx, (_, row) in enumerate(s_level_df.iterrows()):
            contributor_info = {
                "排名": f"S级第{idx + 1}名",
                "姓名": row['canonical_name'],
                "月度提交数": f"{row['month_commit_rate']:.2f}",
                "年度提交数": f"{row['annual_commit_rate']:.2f}",
                "月度新增行数": f"{row['month_total_additions']:.0f}",
                "年度新增行数": f"{row['annual_total_additions']:.0f}",
                "综合评分": f"{row['comprehensive_score']:.0f}",
                "综合评级": row['comprehensive_rating']
            }

            # 添加可选字段
            if 'total_projects' in row:
                contributor_info["项目总数"] = f"{row['total_projects']:.0f}"
            if 'active_days' in row:
                contributor_info["活跃天数"] = f"{row['active_days']:.0f}"
            if 'additions_ratio' in row:
                contributor_info["新增代码占比"] = f"{row['additions_ratio']:.1f}%"
            # 移除深度参与项目数字段，不参与AI分析
            if 'consistency_ratio' in row:
                contributor_info["一致性比例"] = f"{row['consistency_ratio']:.1f}%"
            if 'projects_per_year' in row:
                contributor_info["年均项目数"] = f"{row['projects_per_year']:.1f}"
            # 添加重要的时间字段
            if 'first_commit_time' in row:
                contributor_info["首次提交时间"] = row['first_commit_time'].strftime('%Y-%m-%d %H:%M:%S') if pd.notna(
                    row['first_commit_time']) else "未知"
            if 'last_commit_time' in row:
                contributor_info["最近提交时间"] = row['last_commit_time'].strftime('%Y-%m-%d %H:%M:%S') if pd.notna(
                    row['last_commit_time']) else "未知"

            s_level_contributors.append(contributor_info)

        # 筛选最近三个月内有提交记录的贡献者，取表现最差的10个
        from datetime import datetime, timedelta
        three_months_ago = datetime.now() - timedelta(days=90)

        # 筛选最近三个月内有提交的贡献者
        recent_contributors = df.copy()
        if 'last_commit_time' in df.columns:
            recent_contributors = df[pd.to_datetime(df['last_commit_time']) >= three_months_ago]

        # 按综合评分排序，取最差的10个
        bottom10_contributors = []
        bottom_df = recent_contributors.nsmallest(10, 'comprehensive_score')

        for idx, (_, row) in enumerate(bottom_df.iterrows()):
            contributor_info = {
                "排名": f"倒数第{idx + 1}名",
                "姓名": row['canonical_name'],
                "月度提交数": f"{row['month_commit_rate']:.2f}",
                "年度提交数": f"{row['annual_commit_rate']:.2f}",
                "月度新增行数": f"{row['month_total_additions']:.0f}",
                "年度新增行数": f"{row['annual_total_additions']:.0f}",
                "综合评分": f"{row['comprehensive_score']:.0f}",
                "综合评级": row['comprehensive_rating']
            }

            # 添加可选字段
            if 'total_projects' in row:
                contributor_info["项目总数"] = f"{row['total_projects']:.0f}"
            if 'active_days' in row:
                contributor_info["活跃天数"] = f"{row['active_days']:.0f}"
            if 'additions_ratio' in row:
                contributor_info["新增代码占比"] = f"{row['additions_ratio']:.1f}%"
            if 'consistency_ratio' in row:
                contributor_info["一致性比例"] = f"{row['consistency_ratio']:.1f}%"
            if 'projects_per_year' in row:
                contributor_info["年均项目数"] = f"{row['projects_per_year']:.1f}"
            # 添加重要的时间字段
            if 'first_commit_time' in row:
                contributor_info["首次提交时间"] = row['first_commit_time'].strftime('%Y-%m-%d %H:%M:%S') if pd.notna(
                    row['first_commit_time']) else "未知"
            if 'last_commit_time' in row:
                contributor_info["最近提交时间"] = row['last_commit_time'].strftime('%Y-%m-%d %H:%M:%S') if pd.notna(
                    row['last_commit_time']) else "未知"

            bottom10_contributors.append(contributor_info)

        return {
            "total_contributors": total_contributors,
            "core_metrics": metrics_summary,
            "s_level_contributors": s_level_contributors,
            "bottom10_contributors": bottom10_contributors
        }

    def _create_calculation_process(self) -> str:
        """Create calculation process description"""
        process = """
## Core Metrics Calculation Methods

### 1. Commit Frequency Metrics
- **Monthly Commit Count**: Total Git commits in the last 30 days
- **Annual Commit Count**: Total Git commits in the last 365 days
- Calculation Formula: Direct count of commits within the time range

### 2. Code Contribution Metrics
- **Monthly Lines Added**: Total lines of code added in the last 30 days
- **Annual Lines Added**: Total lines of code added in the last 365 days
- Calculation Formula: sum(additions) - sum(deletions)

### 3. Project Participation Metrics
- **Total Projects**: Number of projects the contributor has participated in
- **Annual Project Rate**: Total Projects / Active Years

### 4. Quality and Consistency Metrics
- **Code Addition Ratio**: Lines Added / (Lines Added + Lines Deleted) * 100%
- **Consistency Ratio**: Continuous Contribution Days / Total Active Days * 100%
- **Active Days**: Total days with commit records

### 5. Comprehensive Scoring Algorithm
Comprehensive Score = Monthly Commit Weight * 200 + Monthly Lines Added Weight * 200 + Project Participation Weight * 50
- Monthly Commit Weight: Normalized score based on commit frequency
- Monthly Lines Added Weight: Normalized score based on code contribution volume
- Project Participation Weight: Comprehensive score based on project quantity and quality

### 6. Rating Standards
- S-Level (Excellent): Comprehensive Score >= 1000 points
- A-Level (Good): 700 <= Comprehensive Score < 1000 points
- B-Level (Average): 560 <= Comprehensive Score < 700 points
- C-Level (Needs Improvement): 280 <= Comprehensive Score < 560 points
- D-Level (Poor): 280 <= Comprehensive Score < 560 points
- E-Level (Very Poor): Comprehensive Score < 280 points
"""
        return process

    def generate_report(self, df: pd.DataFrame, time_range: str = "最近一年") -> str:
        """生成智能分析报告
        
        Args:
            df: 贡献者数据DataFrame
            time_range: 分析时间范围描述
            
        Returns:
            生成的分析报告文本
        """
        try:
            # 准备数据
            data_summary = self._prepare_data_summary(df)
            calculation_process = self._create_calculation_process()

            # 创建提示词
            prompt = self._create_analysis_prompt()

            # 准备输入数据
            core_metrics_data = json.dumps({
                "total_contributors": data_summary["total_contributors"],
                "core_metrics": data_summary["core_metrics"]
            }, ensure_ascii=False, indent=2)

            s_level_contributors_data = json.dumps(data_summary["s_level_contributors"], ensure_ascii=False, indent=2)
            bottom10_contributors_data = json.dumps(data_summary["bottom10_contributors"], ensure_ascii=False, indent=2)

            # 创建LLM链
            chain = LLMChain(llm=self.llm, prompt=prompt)

            # 生成报告
            logger.info("开始生成智能分析报告...")
            report = chain.run(
                total_contributors=data_summary["total_contributors"],
                analysis_date=datetime.now().strftime("%Y年%m月%d日"),
                core_metrics_data=core_metrics_data,
                s_level_contributors_data=s_level_contributors_data,
                bottom10_contributors_data=bottom10_contributors_data,
                calculation_process=calculation_process
            )

            logger.info("智能分析报告生成完成")
            return report

        except Exception as e:
            logger.error(f"生成报告时发生错误: {e}")
            return f"报告生成失败: {str(e)}"

    def generate_report_stream(self, df: pd.DataFrame, time_range: str = "最近一年"):
        """流式生成智能分析报告
        
        Args:
            df: 贡献者数据DataFrame
            time_range: 分析时间范围描述
            
        Yields:
            逐步生成的报告内容片段
        """
        try:
            # 准备数据
            data_summary = self._prepare_data_summary(df)
            calculation_process = self._create_calculation_process()

            # 创建提示词
            prompt = self._create_analysis_prompt()

            # 准备输入数据
            core_metrics_data = json.dumps({
                "total_contributors": data_summary["total_contributors"],
                "core_metrics": data_summary["core_metrics"]
            }, ensure_ascii=False, indent=2)

            s_level_contributors_data = json.dumps(data_summary["s_level_contributors"], ensure_ascii=False, indent=2)
            bottom10_contributors_data = json.dumps(data_summary["bottom10_contributors"], ensure_ascii=False, indent=2)

            # 准备完整的提示词内容
            full_prompt = prompt.format(
                total_contributors=data_summary["total_contributors"],
                time_range=time_range,
                analysis_date=datetime.now().strftime("%Y年%m月%d日"),
                core_metrics_data=core_metrics_data,
                s_level_contributors_data=s_level_contributors_data,
                bottom10_contributors_data=bottom10_contributors_data,
                calculation_process=calculation_process
            )

            logger.info("开始流式生成智能分析报告...")

            # 使用Ollama的流式API
            try:
                # 直接调用Ollama的流式接口
                for chunk in self.llm.stream(full_prompt):
                    if chunk:
                        yield chunk
            except AttributeError:
                # 如果不支持stream方法，则分段返回完整结果
                logger.warning("当前LLM不支持流式输出，使用分段模拟")
                full_result = self.llm(full_prompt)

                # 按句子分割并逐步返回
                sentences = full_result.split('。')
                for i, sentence in enumerate(sentences):
                    if sentence.strip():
                        if i < len(sentences) - 1:
                            yield sentence + '。'
                        else:
                            yield sentence

            logger.info("流式智能分析报告生成完成")

        except Exception as e:
            logger.error(f"流式生成报告时发生错误: {e}")
            yield f"报告生成失败: {str(e)}"

    def export_report(self, report_content: str, filename: str = None) -> str:
        """导出报告到文件
        
        Args:
            report_content: 报告内容
            filename: 文件名，如果为None则自动生成
            
        Returns:
            导出的文件路径
        """
        if filename is None:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"contributor_analysis_report_{timestamp}.md"

        try:
            with open(filename, 'w', encoding='utf-8') as f:
                f.write(f"# 贡献者数据分析报告\n\n")
                f.write(f"生成时间: {datetime.now().strftime('%Y年%m月%d日 %H:%M:%S')}\n\n")
                f.write(report_content)

            logger.info(f"报告已导出到: {filename}")
            return filename

        except Exception as e:
            logger.error(f"导出报告时发生错误: {e}")
            raise
