from flask import Flask, render_template, jsonify, request
import os
import pandas as pd
from datetime import datetime
from werkzeug.utils import secure_filename
import requests
import json


app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 10 * 1024 * 1024  # 10MB 限制

# 上传文件夹配置
UPLOAD_FOLDER = 'uploads'
for folder in ['delivery', 'workload', 'defect']:
    folder_path = os.path.join(UPLOAD_FOLDER, folder)
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)

app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

# 存储解析后的交付数据
delivery_data = []

# 添加统一的日期解析函数
# 改进后的日期解析函数
def parse_date_to_date(date_value):
    """将各种格式的日期值转换为datetime.date对象，只保留年月日"""
    if not date_value:
        return None
    
    try:
        if isinstance(date_value, datetime):
            return date_value.date()
        elif isinstance(date_value, str):
            # 移除时间部分（时分秒）
            date_str = date_value.split(' ')[0].split('T')[0]
            # 尝试多种日期格式
            date_formats = ['%Y-%m-%d', '%Y/%m/%d', '%m/%d/%Y', '%d/%m/%Y', '%Y.%m.%d']
            for fmt in date_formats:
                try:
                    return datetime.strptime(date_str.strip(), fmt).date()
                except ValueError:
                    continue
            # 如果都不匹配，尝试pandas自动解析
            return pd.to_datetime(date_str).date()
        elif isinstance(date_value, pd.Timestamp):
            return date_value.date()
        elif isinstance(date_value, (int, float)):
            # 处理Excel序列号日期
            return pd.to_datetime(date_value, unit='D', origin='1899-12-30').date()
        else:
            # 对于其他类型，尝试转换为字符串再解析
            date_str = str(date_value).split(' ')[0].split('T')[0]
            return pd.to_datetime(date_str).date()
    except (ValueError, TypeError):
        return None

# 存储解析后的交付数据和工作量数据
delivery_data = []
workload_data = []
defect_data = []  # 新增：缺陷数据存储

# 添加工作量日期解析函数
def parse_workload_date(date_value):
    """解析工作量数据中的日期字段，支持多种格式"""
    if not date_value:
        return None
    
    try:
        if isinstance(date_value, datetime):
            return date_value.date()
        elif isinstance(date_value, str):
            # 支持多种日期格式
            date_formats = ['%Y-%m-%d', '%Y/%m/%d', '%m/%d/%Y', '%d/%m/%Y']
            for fmt in date_formats:
                try:
                    return datetime.strptime(date_value.strip(), fmt).date()
                except ValueError:
                    continue
            # 如果都不匹配，尝试pandas自动解析
            return pd.to_datetime(date_value).date()
        elif isinstance(date_value, pd.Timestamp):
            return date_value.date()
        else:
            return pd.to_datetime(str(date_value)).date()
    except:
        return None


# 替换calculate_workload_stats函数（第120-180行）
def calculate_workload_stats(start_date, end_date):
    """根据时间筛选计算工作量统计数据"""
    try:
        start_dt = datetime.strptime(start_date, '%Y-%m-%d').date()
        end_dt = datetime.strptime(end_date, '%Y-%m-%d').date()
        
        workload_stats = []
        
        for record in workload_data:
            # 直接使用标准化后的列名
            user_name = str(record.get('人员名称', '未知人员'))
            saturation = float(record.get('饱和度', 0))
            code_equivalent = float(record.get('代码当量', 0))
            total_hours = float(record.get('总工时', 0))  # 现在能正确获取
            schedule_hours = float(record.get('排期工时', 0))  # 现在能正确获取
            ai_active_days = int(record.get('AI活跃天数', 0))
            
            # 调试输出
            print(f"📊 用户: {user_name}, 总工时: {total_hours}, 排期工时: {schedule_hours}")
            
            workload_stats.append({
                'user_name': user_name,
                'saturation': round(saturation, 1),
                'code_equivalent': round(code_equivalent, 2),
                'total_hours': round(total_hours, 1),
                'schedule_hours': round(schedule_hours, 1),
                'ai_active_days': ai_active_days,
                'period_start': record.get('统计周期开始'),
                'period_end': record.get('统计周期结束')
            })
        
        return workload_stats
        
    except Exception as e:
        print(f"❌ 计算工作量统计数据失败: {e}")
        import traceback
        traceback.print_exc()
        return []
# 修改 calculate_delivery_stats 函数
# 从第130行开始

def calculate_delivery_stats(start_date, end_date):
    """根据时间筛选计算交付统计数据"""
    try:
        start_dt = datetime.strptime(start_date, '%Y-%m-%d').date()
        end_dt = datetime.strptime(end_date, '%Y-%m-%d').date()
        
        # 筛选新建时间在指定时间范围内的需求
        new_requirements_data = []
        # 筛选实际上线时间在指定时间范围内的需求
        delivered_requirements_data = []
        
        for record in delivery_data:
            create_time = None
            online_time = None
            
            # 尝试获取创建时间
            if '创建时间' in record and record['创建时间']:
                create_time = parse_date_to_date(record['创建时间'])
            
            # 尝试获取实际上线时间
            if '实际上线时间' in record and record['实际上线时间']:
                online_time = parse_date_to_date(record['实际上线时间'])
            
            # 新增需求：按创建时间筛选
            if create_time and start_dt <= create_time <= end_dt:
                new_requirements_data.append(record)
            
            # 需求交付：按实际上线时间筛选
            if online_time and start_dt <= online_time <= end_dt:
                delivered_requirements_data.append(record)
        
        # 计算统计指标
        new_requirements = len(new_requirements_data)
        delivered_requirements = len(delivered_requirements_data)
        
        # 计算吞吐率（交付需求占新增需求的比例）
        throughput_rate = (delivered_requirements / new_requirements * 100) if new_requirements > 0 else 0
        
        return {
            'new_requirements': new_requirements,
            'delivered_requirements': delivered_requirements,
            'throughput_rate': throughput_rate
        }
        
    except Exception as e:
        print(f"计算交付统计数据失败: {e}")
        return {
            'new_requirements': 0,
            'delivered_requirements': 0,
            'throughput_rate': 0
        }

# 修改 calculate_ft_project_stats 函数
# 从第190行开始

def calculate_ft_project_stats(start_date, end_date):
    """根据时间筛选计算FT项目统计数据"""
    try:
        start_dt = datetime.strptime(start_date, '%Y-%m-%d').date()
        end_dt = datetime.strptime(end_date, '%Y-%m-%d').date()
        
        # 按FT项目分组统计
        ft_projects = {}
        
        for record in delivery_data:
            create_time = None
            online_time = None
            
            # 获取创建时间
            if '创建时间' in record and record['创建时间']:
                create_time = parse_date_to_date(record['创建时间'])
            
            # 获取实际上线时间
            if '实际上线时间' in record and record['实际上线时间']:
                online_time = parse_date_to_date(record['实际上线时间'])
            
            # 获取FT项目名称
            ft_name = record.get('业务线', '未知项目')
            
            if ft_name not in ft_projects:
                ft_projects[ft_name] = {
                    'project_name': ft_name,
                    'new_requirements': 0,
                    'delivered_requirements': 0
                }
            
            # 新增需求：按创建时间统计
            if create_time and start_dt <= create_time <= end_dt:
                ft_projects[ft_name]['new_requirements'] += 1
            
            # 需求交付：按实际上线时间统计
            if online_time and start_dt <= online_time <= end_dt:
                ft_projects[ft_name]['delivered_requirements'] += 1
        
        # 计算每个项目的吞吐率
        result = []
        for project in ft_projects.values():
            throughput_rate = (project['delivered_requirements'] / project['new_requirements'] * 100) if project['new_requirements'] > 0 else 0
            project['throughput_rate'] = round(throughput_rate, 1)
            result.append(project)
        
        return result
        
    except Exception as e:
        print(f"计算FT项目统计数据失败: {e}")
        return []
def calculate_defect_stats(start_date, end_date):
    """计算缺陷统计数据"""
    try:
        start_dt = datetime.strptime(start_date, '%Y-%m-%d').date()
        end_dt = datetime.strptime(end_date, '%Y-%m-%d').date()
        
        # 筛选在指定时间范围内的缺陷数据
        filtered_defects = []
        for record in defect_data:
            # 尝试获取创建时间
            create_time = None
            if '创建时间' in record and record['创建时间']:
                create_time = parse_date_to_date(record['创建时间'])
            elif '缺陷创建时间' in record and record['缺陷创建时间']:
                create_time = parse_date_to_date(record['缺陷创建时间'])
            
            if create_time and start_dt <= create_time <= end_dt:
                filtered_defects.append(record)
        
        if not filtered_defects:
            return {
                'total_defects': 0,
                'status_stats': {},
                'priority_stats': {},
                'ft_defect_stats': [],
                'defect_list': []
            }
        
        # 按状态分组统计
        status_stats = {}
        for defect in filtered_defects:
            status = str(defect.get('状态', defect.get('缺陷状态', '未知')))
            status_stats[status] = status_stats.get(status, 0) + 1
        
        # 按优先级分组统计
        priority_stats = {}
        for defect in filtered_defects:
            priority = str(defect.get('优先级', defect.get('缺陷优先级', '未知')))
            priority_stats[priority] = priority_stats.get(priority, 0) + 1
        
        # 按业务线分组统计
        ft_defect_stats = {}
        for defect in filtered_defects:
            ft_name = str(defect.get('业务线', '未知项目'))
            if ft_name not in ft_defect_stats:
                ft_defect_stats[ft_name] = {
                    'ft_name': ft_name,
                    'total_defects': 0,
                    'open_defects': 0,
                    'closed_defects': 0,
                    'defects_by_status': {}
                }
            
            ft_defect_stats[ft_name]['total_defects'] += 1
            
            # 统计开启和关闭的缺陷
            status = str(defect.get('状态', defect.get('缺陷状态', '未知')))
            if status.lower() in ['已关闭', 'closed', '已解决', 'resolved', '已完成']:
                ft_defect_stats[ft_name]['closed_defects'] += 1
            else:
                ft_defect_stats[ft_name]['open_defects'] += 1
            
            # 按状态统计
            ft_defect_stats[ft_name]['defects_by_status'][status] = \
                ft_defect_stats[ft_name]['defects_by_status'].get(status, 0) + 1
        
        # 按创建人分组统计
        creator_stats = {}
        for defect in filtered_defects:
            creator = str(defect.get('创建人', defect.get('缺陷创建人', defect.get('创建者', '未知'))))
            creator_stats[creator] = creator_stats.get(creator, 0) + 1
        
        # 准备缺陷列表（只显示最近50个）
        defect_list = []
        for defect in filtered_defects[-50:]:  # 只取最近50个
            defect_info = {
                'defect_id': str(defect.get('缺陷ID', defect.get('ID', '未知'))),
                'title': str(defect.get('标题', defect.get('缺陷标题', '无标题'))),
                'status': str(defect.get('状态', defect.get('缺陷状态', '未知'))),
                'priority': str(defect.get('优先级', defect.get('缺陷优先级', '未知'))),
                'ft_name': str(defect.get('业务线', '未知项目')),
                'assignee': str(defect.get('负责人', defect.get('处理人', '未分配'))),
                'create_time': str(defect.get('创建时间', defect.get('缺陷创建时间', '')))
            }
            defect_list.append(defect_info)
        
        return {
            'total_defects': len(filtered_defects),
            'status_stats': status_stats,
            'priority_stats': priority_stats,
            'creator_stats': creator_stats,  # 新增：按创建人统计
            'ft_defect_stats': list(ft_defect_stats.values()),
            'defect_list': defect_list
        }
        
    except Exception as e:
        print(f"计算缺陷统计数据失败: {e}")
        import traceback
        traceback.print_exc()
        return {
            'total_defects': 0,
            'status_stats': {},
            'priority_stats': {},
            'ft_defect_stats': [],
            'defect_list': []
        }
# 修改 calculate_delayed_requirements 函数
# 从第207行开始

def calculate_delayed_requirements(start_date, end_date):
    """根据时间筛选计算延期需求数据"""
    try:
        start_dt = datetime.strptime(start_date, '%Y-%m-%d').date()
        end_dt = datetime.strptime(end_date, '%Y-%m-%d').date()
        
        delayed_requirements = []
        
        for record in delivery_data:
            # 获取所有相关时间字段
            plan_online_time = None
            actual_online_time = None
            plan_test_time = None
            actual_test_time = None
            
            if '计划上线时间' in record and record['计划上线时间']:
                plan_online_time = parse_date_to_date(record['计划上线时间'])
            
            if '实际上线时间' in record and record['实际上线时间']:
                actual_online_time = parse_date_to_date(record['实际上线时间'])
                
            if '计划提测时间' in record and record['计划提测时间']:
                plan_test_time = parse_date_to_date(record['计划提测时间'])
            
            if '实际提测时间' in record and record['实际提测时间']:
                actual_test_time = parse_date_to_date(record['实际提测时间'])
            
            # 新增严格筛选：实际上线时间必须存在且在时间范围内
            if not actual_online_time:
                continue  # 实际上线时间为空，跳过
                
            if not (start_dt <= actual_online_time <= end_dt):
                continue  # 实际上线时间不在范围内，跳过
            
            # 检查是否有其他相关时间字段在时间范围内（保持原有逻辑）
            has_time_in_range = False
            
            if plan_online_time and start_dt <= plan_online_time <= end_dt:
                has_time_in_range = True
            
            if actual_online_time and start_dt <= actual_online_time <= end_dt:
                has_time_in_range = True
            
            if plan_test_time and start_dt <= plan_test_time <= end_dt:
                has_time_in_range = True
            
            if actual_test_time and start_dt <= actual_test_time <= end_dt:
                has_time_in_range = True
            
            if not has_time_in_range:
                continue
            
            # 延期判断逻辑（保持不变）
            is_delayed = False
            delay_reason = []
            
            # 1. 检查实际上线时间延期
            if plan_online_time and plan_online_time:
                if actual_online_time and actual_online_time:
                    if actual_online_time > plan_online_time:
                        is_delayed = True
                        delay_reason.append("上线延期")
                else:
                    is_delayed = True
                    delay_reason.append("未填写实际上线时间")
            
            # 2. 检查实际提测时间延期
            if plan_test_time and plan_test_time:
                if actual_test_time and actual_test_time:
                    if actual_test_time > plan_test_time:
                        is_delayed = True
                        delay_reason.append("提测延期")
                else:
                    is_delayed = True
                    delay_reason.append("未填写实际提测时间")
            
            # 3. 特殊情况处理
            if (plan_online_time and plan_online_time and 
                (not record.get('实际上线时间') or str(record.get('实际上线时间')).strip() == '')):
                is_delayed = True
                delay_reason.append("计划上线但未完成")
            
            if is_delayed:
                # 尝试获取需求名称
                requirement_name = '未知需求'
                for col in ['需求名称', '需求', '需求名', '需求标题', '需求描述']:
                    if col in record and record[col] and pd.notna(record[col]):
                        requirement_name = str(record[col])
                        break
                
                delayed_requirements.append({
                    'requirement_name': requirement_name,
                    'ft_name': str(record.get('业务线', '未知项目')),
                    'system': str(record.get('系统', '未知系统')),
                    'pm': str(record.get('PM', '')),
                    'rd': str(record.get('RD', '')),
                    'fe': str(record.get('FE', '')),
                    'qa': str(record.get('QA', '')),
                    'plan_online_time': str(record.get('计划上线时间', '')),
                    'actual_online_time': str(record.get('实际上线时间', '')),
                    'plan_test_time': str(record.get('计划提测时间', '')),
                    'actual_test_time': str(record.get('实际提测时间', '')),
                    'delay_reason': '、'.join(list(set(delay_reason))) if delay_reason else '延期'
                })
        
        return delayed_requirements
        
    except Exception as e:
        print(f"计算延期需求数据失败: {e}")
        import traceback
        traceback.print_exc()
        return []
@app.route("/api/performance", methods=["GET"])
def api_performance():
    start_date = request.args.get('start_date')
    end_date = request.args.get('end_date')
    
    print(f"🔄 API请求: start_date={start_date}, end_date={end_date}")
    print(f"📊 当前数据量: delivery={len(delivery_data)}, workload={len(workload_data)}, defect={len(defect_data)}")
    
    if start_date and end_date:
        try:
            # 计算统计数据
            stats = calculate_delivery_stats(start_date, end_date)
            ft_projects = calculate_ft_project_stats(start_date, end_date)
            delayed_requirements = calculate_delayed_requirements(start_date, end_date)
            workload_stats = calculate_workload_stats(start_date, end_date)
            defect_stats = calculate_defect_stats(start_date, end_date)  # 新增
            
            print(f"✅ 计算完成: ft_projects={len(ft_projects)}, delayed={len(delayed_requirements)}, workload={len(workload_stats)}, defect={defect_stats['total_defects']}")
            
            result = {
                "delivery_stats": stats,
                "ft_projects": ft_projects,
                "delayed_requirements": delayed_requirements,
                "workload_stats": workload_stats,
                "defect_stats": defect_stats  # 新增
            }
            
            return jsonify(result)
            
        except Exception as e:
            print(f"❌ 计算错误: {e}")
        except requests.exceptions.Timeout:
                return jsonify({
                    "success": False,
                    "message": "AI分析请求超时，请检查网络连接"
                })
        except requests.exceptions.RequestException as e:
                return jsonify({
                    "success": False,
                    "message": f"网络请求失败: {str(e)}"
                })

    return jsonify({
        "delivery_stats": {"new_requirements": 0, "delivered_requirements": 0, "throughput_rate": 0},
        "ft_projects": [],
        "delayed_requirements": [],
        "workload_stats": [],
        "defect_stats": {"total_defects": 0, "status_stats": {}, "priority_stats": {}, "ft_defect_stats": [], "defect_list": []}
    })

# 修改文件处理函数，支持工作量数据
@app.route("/api/upload/<data_type>", methods=["POST"])
def upload_file_by_type(data_type):
    try:
        if data_type not in ['delivery', 'workload', 'defect']:
            return jsonify({"success": False, "message": "无效的数据类型"}), 400
            
        if 'file' not in request.files:
            return jsonify({"success": False, "message": "未找到文件"}), 400
        
        file = request.files['file']
        if file.filename == '':
            return jsonify({"success": False, "message": "未选择文件"}), 400
        
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            # 按类型分文件夹存储
            type_folder = os.path.join(app.config['UPLOAD_FOLDER'], data_type)
            filepath = os.path.join(type_folder, filename)
            file.save(filepath)
            
            # 根据数据类型进行不同的处理逻辑
            rows_processed = process_file_by_type(filepath, data_type)
            
            return jsonify({
                "success": True, 
                "message": f"{get_type_name(data_type)}上传成功", 
                "filename": filename,
                "rows": rows_processed,
                "data_type": data_type
            })
        else:
            return jsonify({"success": False, "message": "不支持的文件格式"}), 400
            
    except Exception as e:
        return jsonify({"success": False, "message": f"上传失败: {str(e)}"}), 500

# 修改文件处理函数
# 在文件开头添加（约第50行附近）
# 存储解析后的交付数据、工作量数据和缺陷数据
delivery_data = []
workload_data = []
defect_data = []  # 新增：缺陷数据存储

# 修改工作量数据处理部分
# 修改后的代码（删除第550行的冗余return）
# 完全修复后的 process_file_by_type 函数
# 替换现有的不完整实现

def process_file_by_type(filepath, data_type):
    try:
        if data_type == 'delivery':
            # 处理交付数据
            try:
                if filepath.endswith('.xls'):
                    df = pd.read_excel(filepath, engine='xlrd')
                else:
                    df = pd.read_excel(filepath, engine='openpyxl')
                
                # 清空之前的交付数据
                delivery_data.clear()
                
                # 标准化列名
                df.columns = df.columns.str.strip()
                
                # 处理交付数据
                for _, row in df.iterrows():
                    record = {}
                    for col in df.columns:
                        record[col] = row[col] if pd.notna(row[col]) else None
                    delivery_data.append(record)
                
                return len(delivery_data)
                
            except Exception as e:
                print(f"交付文件处理失败: {e}")
                return 0
                
        elif data_type == 'workload':
            # 支持.xls和.xlsx格式
            try:
                if filepath.endswith('.xls'):
                    df = pd.read_excel(filepath, engine='xlrd')
                else:
                    df = pd.read_excel(filepath, engine='openpyxl')
                
                print(f"📊 工作量文件读取成功: {df.shape}")
                
                # 清空之前的工作量数据
                workload_data.clear()
                
                # 标准化列名
                df.columns = df.columns.str.strip()
                
                # 智能列名映射
                # 替换process_file_by_type中workload处理部分的列名映射（第480行附近）
                # 修复后的列名映射 - 包含实际列名
                column_mapping = {
                    '人员名称': ['人员名称', '用户名称', '姓名', '成员名称', '人员姓名', 'name', 'Name'],
                    '饱和度': ['饱和度', '工作饱和度', '负载率', '饱和度(%)', 'saturation', 'Saturation'],
                    '代码当量': ['代码当量', '代码量', '代码贡献', '当量', 'code_equivalent', 'Code'],
                    '总工时': ['总工时', '工时', '工作时长', '总时长(小时)', 'hours', 'Hours', '总时长', '排期工时(pd)'],
                    '排期工时': ['排期工时', '计划工时', '预期工时', '排期时长', 'schedule_hours', 'Schedule Hours', '排期工时(pd)'],
                    'AI活跃天数': ['AI活跃天数', 'AI使用天数', '活跃天数', 'AI天数', 'ai_days', 'AI Days'],
                    '统计周期开始': ['统计周期开始', '开始日期', '周期开始', 'start_date', 'Start Date'],
                    '统计周期结束': ['统计周期结束', '结束日期', '周期结束', 'end_date', 'End Date']
                }
                
                # 创建反向映射
                reverse_mapping = {}
                for standard_name, possible_names in column_mapping.items():
                    for col_name in possible_names:
                        if col_name in df.columns:
                            reverse_mapping[standard_name] = col_name
                            break
                
                # 在process_file_by_type的workload处理部分添加调试信息
                # 添加调试输出
                print(f"📊 原始列名: {list(df.columns)}")
                print(f"🔗 列名映射: {reverse_mapping}")
                
                # 处理每一行数据
                for _, row in df.iterrows():
                    record = {}
                    
                    # 调试：显示原始值
                    for standard_name, actual_col in reverse_mapping.items():
                        raw_value = row[actual_col] if pd.notna(row[actual_col]) else None
                        print(f"📝 {standard_name}: {actual_col} -> {raw_value}")
                        
                        # 数据类型转换
                        if standard_name == '饱和度':
                            try:
                                saturation_str = str(raw_value).replace('%', '')
                                record[standard_name] = float(saturation_str) if raw_value else 0
                            except:
                                record[standard_name] = 0
                        elif standard_name in ['总工时', '代码当量', '排期工时']:
                            try:
                                record[standard_name] = float(raw_value) if raw_value else 0
                            except:
                                record[standard_name] = 0
                        elif standard_name == 'AI活跃天数':
                            try:
                                record[standard_name] = int(float(str(raw_value))) if raw_value else 0
                            except:
                                record[standard_name] = 0
                        elif standard_name in ['统计周期开始', '统计周期结束']:
                            record[standard_name] = str(raw_value) if raw_value else None
                        else:
                            record[standard_name] = str(raw_value) if raw_value else ''
                    
                    # 确保包含所有必要字段
                    record.setdefault('人员名称', '')
                    record.setdefault('饱和度', 0)
                    record.setdefault('代码当量', 0)
                    record.setdefault('总工时', 0)
                    record.setdefault('AI活跃天数', 0)
                    record.setdefault('统计周期开始', None)
                    record.setdefault('统计周期结束', None)
                    record.setdefault('排期工时', 0)
                    
                    workload_data.append(record)
                
                print(f"✅ 工作量数据处理完成: {len(workload_data)} 条记录")
                return len(workload_data)
                
            except Exception as e:
                print(f"工作量文件处理失败: {e}")
                import traceback
                traceback.print_exc()
                return 0
                
        elif data_type == 'defect':
            # 处理缺陷数据
            try:
                if filepath.endswith('.xls'):
                    df = pd.read_excel(filepath, engine='xlrd')
                else:
                    df = pd.read_excel(filepath, engine='openpyxl')
                
                # 清空之前的缺陷数据
                defect_data.clear()
                
                # 标准化列名
                df.columns = df.columns.str.strip()
                
                # 处理缺陷数据
                for _, row in df.iterrows():
                    record = {}
                    for col in df.columns:
                        record[col] = row[col] if pd.notna(row[col]) else None
                    defect_data.append(record)
                
                return len(defect_data)
                
            except Exception as e:
                print(f"缺陷文件处理失败: {e}")
                return 0
                
        else:
            return 0
            
    except Exception as e:
        print(f"文件处理失败: {e}")
        return 0

def get_type_name(data_type):
    """获取数据类型的中文名称"""
    type_names = {
        'delivery': '交付数据',
        'workload': '工作量数据',
        'defect': '缺陷数据'
    }
    return type_names.get(data_type, '数据')


def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in {'xlsx', 'xls'}

@app.route("/api/ai-analyze", methods=["POST"])
def ai_analyze():
    """调用SiliconFlow API进行AI分析，支持重试机制"""
    import time
    
    try:
        data = request.json
        
        # 构建分析提示
        prompt = f"""
        请作为效能分析专家，对以下团队效能数据进行全面分析：
        
        交付情况：
        - 需求吞吐率：{data.get('delivery_stats', {}).get('throughput_rate', 0)}%
        - 新增需求数量：{data.get('delivery_stats', {}).get('new_requirements', 0)}
        - 需求交付数量：{data.get('delivery_stats', {}).get('delivered_requirements', 0)}
        
        FT项目明细：
        {json.dumps(data.get('ft_projects', []), ensure_ascii=False, indent=2)}
        
        延期需求：
        {json.dumps(data.get('delayed_requirements', []), ensure_ascii=False, indent=2)}
        
        工作量情况：
        {json.dumps(data.get('workload_stats', []), ensure_ascii=False, indent=2)}
        
        请提供：
        1. 关键发现和趋势分析
        2. 存在的问题和风险
        3. 改进建议和优化方向
        4. 团队协作效率评估
        5. 资源分配优化建议
        
        请用中文回答，保持专业但易于理解。
        """
        
        # 调用SiliconFlow API
        api_key = "sk-cgeodmtqtwhrrcaqppopyblkvfwcggowlqryvqwyhoivfiub"
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        
        payload = {
            "model": "Qwen/Qwen3-8B",
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "max_tokens": 2000,
            "temperature": 0.7
        }
        
        # 重试机制配置
        max_retries = 3
        retry_delay = 1  # 初始延迟1秒
        
        for attempt in range(max_retries):
            try:
                print(f"🤖 正在进行AI分析，尝试第 {attempt + 1}/{max_retries} 次...")
                
                response = requests.post(
                    "https://api.siliconflow.cn/v1/chat/completions",
                    headers=headers,
                    json=payload,
                    timeout=60  # 增加到60秒超时
                )
                
                if response.status_code == 200:
                    result = response.json()
                    analysis = result['choices'][0]['message']['content']
                    
                    print("✅ AI分析完成")
                    return jsonify({
                        "success": True,
                        "analysis": {
                            "summary": analysis,
                            "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                            "data_summary": {
                                "total_projects": len(data.get('ft_projects', [])),
                                "delayed_count": len(data.get('delayed_requirements', [])),
                                "workload_count": len(data.get('workload_stats', []))
                            },
                            "api_calls": attempt + 1
                        }
                    })
                else:
                    error_msg = f"API调用失败: {response.status_code}"
                    print(f"⚠️ {error_msg}")
                    
                    if attempt < max_retries - 1:
                        print(f"⏳ 等待 {retry_delay} 秒后重试...")
                        time.sleep(retry_delay)
                        retry_delay *= 2  # 指数退避
                    else:
                        return jsonify({
                            "success": False,
                            "message": error_msg,
                            "retry_count": attempt + 1
                        })
                        
            except requests.exceptions.Timeout:
                print(f"⏰ 第 {attempt + 1} 次请求超时")
                if attempt < max_retries - 1:
                    print(f"⏳ 等待 {retry_delay} 秒后重试...")
                    time.sleep(retry_delay)
                    retry_delay *= 2
                else:
                    return jsonify({
                        "success": False,
                        "message": "API调用超时，请稍后重试",
                        "retry_count": attempt + 1
                    })
                    
            except requests.exceptions.ConnectionError as e:
                print(f"🔌 连接错误: {e}")
                if attempt < max_retries - 1:
                    print(f"⏳ 等待 {retry_delay} 秒后重试...")
                    time.sleep(retry_delay)
                    retry_delay *= 2
                else:
                    return jsonify({
                        "success": False,
                        "message": "网络连接失败，请检查网络设置",
                        "retry_count": attempt + 1
                    })
                    
    except Exception as e:
        print(f"❌ AI分析失败: {e}")
        return jsonify({
            "success": False,
            "message": f"分析过程出错: {str(e)}"
        })
@app.route("/")
def index():
    """主页路由"""
    return render_template("base.html")

@app.route("/performance")
def performance():
    """性能监控页面路由"""
    return render_template("performance.html")

@app.route("/settings")
def settings():
    """设置页面路由"""
    return render_template("settings.html")

@app.route("/ai_report")
def ai_report():
    """AI报告页面路由"""
    return render_template("ai_report.html")


def load_latest_files():
    """项目启动时读取 uploads 中最新的交付 / 工作量 / 缺陷文件"""
    for data_type in ["delivery", "workload", "defect"]:
        type_folder = os.path.join(UPLOAD_FOLDER, data_type)
        if not os.path.isdir(type_folder):
            continue
        # 仅保留 Excel 文件
        excel_files = [
            os.path.join(type_folder, f)
            for f in os.listdir(type_folder)
            if f.lower().endswith((".xls", ".xlsx"))
        ]
        if not excel_files:
            continue
        latest_file = max(excel_files, key=os.path.getmtime)
        print(f"🔄 加载最新{get_type_name(data_type)}文件: {os.path.basename(latest_file)}")
        process_file_by_type(latest_file, data_type)


if __name__ == "__main__":
    # 先预加载历史数据，再启动服务
    load_latest_files()
    app.run(host="127.0.0.1", port=5000, debug=True)