# 顶部导入与配置区域
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
import pandas as pd
import os
import json
import uuid
from datetime import datetime
import requests
from werkzeug.utils import secure_filename
import logging
import sys
import time
import random
import pickle
import hashlib
from redis import Redis
from rq import Queue
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

# 导入配置
from config import AI_CONFIG, DATA_CONFIG, REDIS_CONFIG, RATE_LIMITS, REQUESTS_POOL

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('app.log', encoding='utf-8'),
        logging.StreamHandler(sys.stdout)
    ]
)

logger = logging.getLogger(__name__)

app = Flask(__name__, template_folder='../templates', static_folder='../static')
CORS(app)

# 初始化限流（保护服务免受突发流量影响）
limiter = Limiter(get_remote_address, app=app, default_limits=[RATE_LIMITS['default'], RATE_LIMITS['burst']])

# 配置上传
UPLOAD_FOLDER = DATA_CONFIG['upload_folder']
ALLOWED_EXTENSIONS = set(DATA_CONFIG['allowed_extensions'])
MAX_CONTENT_LENGTH = DATA_CONFIG['max_file_size']
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = MAX_CONTENT_LENGTH
os.makedirs(UPLOAD_FOLDER, exist_ok=True)

# AI模型配置改为从 config 读取
SILICONFLOW_API_KEY = AI_CONFIG['api_key']
SILICONFLOW_API_URL = AI_CONFIG['api_url']
MODEL_NAME = AI_CONFIG['model_name']
AI_TIMEOUT = AI_CONFIG['timeout']
MAX_RETRIES = AI_CONFIG['max_retries']
logger.info(f"AI模型配置 - URL: {SILICONFLOW_API_URL}, Model: {MODEL_NAME}")

# 新增：Redis 客户端与队列
redis_client = Redis(
    host=REDIS_CONFIG['host'],
    port=REDIS_CONFIG['port'],
    db=REDIS_CONFIG['db'],
    password=(REDIS_CONFIG['password'] or None),
    decode_responses=False,  # 存二进制更高效
)
task_queue = Queue(
    REDIS_CONFIG['queue_name'],
    connection=redis_client,
    default_timeout=AI_CONFIG['timeout'] + 60,
    result_ttl=REDIS_CONFIG['result_ttl']
)

# 新增：requests 连接池与重试 - 优化连接稳定性
session = requests.Session()

# 配置连接池参数，优化网络连接
adapter = HTTPAdapter(
    pool_maxsize=REQUESTS_POOL['pool_maxsize'],
    pool_connections=10,  # 增加连接池大小
    pool_block=False,  # 不阻塞等待连接
    max_retries=Retry(
        total=REQUESTS_POOL['max_retries'],
        backoff_factor=1.0,  # 增加退避因子
        status_forcelist=[429, 500, 502, 503, 504, 520, 521, 522, 523, 524],  # 增加更多重试状态码
        allowed_methods=False,  # 兼容性：对所有方法重试
        raise_on_status=False,  # 不抛出异常，由代码处理
        respect_retry_after_header=True  # 尊重重试头部
    )
)

# 配置SSL/TLS设置
session.mount('https://', adapter)
session.mount('http://', adapter)

# 设置会话级别的超时和SSL配置
session.timeout = (AI_CONFIG['connection_timeout'], AI_CONFIG['read_timeout'])
session.verify = True  # 启用SSL验证

def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

def get_file_hash(file_path):
    """获取文件的MD5哈希值"""
    hash_md5 = hashlib.md5()
    with open(file_path, "rb") as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_md5.update(chunk)
    return hash_md5.hexdigest()

def parse_excel_file(file_path):
    """解析Excel文件，提取所有sheet页的数据"""
    logger.info(f"开始解析Excel文件: {file_path}")
    
    try:
        # 检查缓存
        file_hash = get_file_hash(file_path)
        cache_file = f"cache_{file_hash}.pkl"
        
        if os.path.exists(cache_file):
            logger.info("发现缓存文件，直接加载...")
            with open(cache_file, 'rb') as f:
                cached_data = pickle.load(f)
                logger.info(f"从缓存加载了 {len(cached_data)} 条记录")
                return cached_data
        
        # 读取所有sheet页
        excel_file = pd.ExcelFile(file_path)
        logger.info(f"Excel文件包含 {len(excel_file.sheet_names)} 个sheet页")
        
        all_data = []
        
        for sheet_name in excel_file.sheet_names:
            try:
                df = pd.read_excel(file_path, sheet_name=sheet_name, dtype=str)
                logger.info(f"Sheet '{sheet_name}' 包含 {len(df)} 行")
                
                # 查找相关列
                column_mapping = {
                    '项目组': ['项目组', '项目', 'project', 'group', 'team'],
                    '编辑人': ['编辑人', '姓名', '员工', 'name', 'editor', 'author'],
                    '填写日期': ['填写日期', '日期', '时间', 'date', 'time', 'created_date'],
                    '今日工作总结': ['今日工作总结', '工作总结', '总结', '工作内容', 'summary', 'work_summary', 'content']
                }
                
                # 找到实际的列名
                actual_columns = {}
                for key, possible_names in column_mapping.items():
                    for col in df.columns:
                        if any(name.lower() in str(col).lower() for name in possible_names):
                            actual_columns[key] = col
                            break
                
                # 提取数据
                for index, row in df.iterrows():
                    try:
                        # 处理日期格式
                        date_value = row.get(actual_columns.get('填写日期', ''), '')
                        if pd.notna(date_value):
                            if hasattr(date_value, 'strftime'):
                                date_str = date_value.strftime('%Y-%m-%d')
                            else:
                                date_str = str(date_value)
                        else:
                            date_str = ''
                        
                        record = {
                            'sheet_name': sheet_name,
                            '项目组': str(row.get(actual_columns.get('项目组', ''), '')),
                            '编辑人': str(row.get(actual_columns.get('编辑人', ''), '')),
                            '填写日期': date_str,
                            '今日工作总结': str(row.get(actual_columns.get('今日工作总结', ''), ''))
                        }
                        
                        # 过滤空记录
                        if any(record[key].strip() for key in ['项目组', '编辑人', '今日工作总结'] if record[key]):
                            all_data.append(record)
                            
                    except Exception as row_error:
                        logger.warning(f"处理第{index}行数据时出错: {str(row_error)}")
                        continue
                        
            except Exception as sheet_error:
                logger.error(f"处理sheet '{sheet_name}' 时出错: {str(sheet_error)}")
                continue
        
        # 保存到缓存
        with open(cache_file, 'wb') as f:
            pickle.dump(all_data, f)
        logger.info(f"数据已缓存到 {cache_file}")
        
        logger.info(f"Excel解析完成，总共提取 {len(all_data)} 条有效记录")
        return all_data
        
    except Exception as e:
        logger.error(f"解析Excel文件失败: {str(e)}", exc_info=True)
        return []

def make_ai_request(prompt, max_tokens=3000):
    """优化的AI请求函数：使用全局连接池 session，增强网络稳定性"""
    headers = {
        "Authorization": f"Bearer {SILICONFLOW_API_KEY}",
        "Content-Type": "application/json",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
    }
    data = {
            "model": MODEL_NAME,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": 0.2,  # 降低温度提高稳定性
            "max_tokens": max_tokens,
            "stream": False,  # 关闭流式输出，确保完整响应
            "frequency_penalty": 0.1,  # 降低重复性
            "presence_penalty": 0.1    # 增加多样性
        }
    
    for attempt in range(MAX_RETRIES):
        try:
            # 渐进式超时：每次重试增加超时时间
            current_timeout = AI_TIMEOUT + (attempt * 120)  # 300, 420, 540秒
            connection_timeout = min(60, 30 + (attempt * 15))  # 连接超时递增
            read_timeout = current_timeout
            
            logger.info(f"AI请求尝试 {attempt + 1}/{MAX_RETRIES}, 连接超时: {connection_timeout}s, 读取超时: {read_timeout}s")
            
            start_time = datetime.now()
            
            # 使用会话连接池发送请求
            response = session.post(
                SILICONFLOW_API_URL,
                headers=headers,
                json=data,
                timeout=(connection_timeout, read_timeout),
                allow_redirects=True,
                verify=True  # 启用SSL验证
            )
            
            end_time = datetime.now()
            duration = (end_time - start_time).total_seconds()
            logger.info(f"API响应: {response.status_code}, 耗时: {duration:.2f}秒")
            
            if response.status_code == 200:
                result = response.json()
                return result['choices'][0]['message']['content']
            elif response.status_code == 429:
                wait_time = (attempt + 1) * 30  # 增加等待时间
                logger.warning(f"速率限制，等待{wait_time}秒后重试...")
                time.sleep(wait_time)
                continue
            else:
                logger.error(f"AI请求失败: {response.status_code} - {response.text[:300]}")
                if attempt < MAX_RETRIES - 1:
                    wait_time = AI_CONFIG['retry_delay'] * (attempt + 1)
                    logger.info(f"等待{wait_time}秒后重试...")
                    time.sleep(wait_time)
                    continue
                    
        except requests.exceptions.Timeout as e:
            logger.warning(f"请求超时 (尝试 {attempt + 1}/{MAX_RETRIES}): {str(e)}")
            if attempt < MAX_RETRIES - 1:
                wait_time = AI_CONFIG['retry_delay'] * (attempt + 2)
                logger.info(f"等待{wait_time}秒后重试...")
                time.sleep(wait_time)
                continue
                
        except requests.exceptions.ConnectionError as e:
            logger.warning(f"连接错误 (尝试 {attempt + 1}/{MAX_RETRIES}): {str(e)}")
            if attempt < MAX_RETRIES - 1:
                # 增加随机延迟避免并发冲突
                base_wait = AI_CONFIG['retry_delay'] * (attempt + 1)
                random_delay = random.randint(5, 15)
                total_wait = base_wait + random_delay
                logger.info(f"等待{total_wait}秒后重试（包含随机延迟{random_delay}秒）...")
                time.sleep(total_wait)
                continue
                
        except requests.exceptions.SSLError as e:
            logger.warning(f"SSL连接错误 (尝试 {attempt + 1}/{MAX_RETRIES}): {str(e)}")
            if attempt < MAX_RETRIES - 1:
                wait_time = AI_CONFIG['retry_delay'] * (attempt + 3)  # SSL错误需要更长等待
                logger.info(f"等待{wait_time}秒后重试...")
                time.sleep(wait_time)
                continue
                
        except Exception as e:
            logger.error(f"AI请求异常 (尝试 {attempt + 1}/{MAX_RETRIES}): {str(e)}", exc_info=True)
            if attempt < MAX_RETRIES - 1:
                wait_time = AI_CONFIG['retry_delay'] * (attempt + 2)
                time.sleep(wait_time)
                continue
    
    return "AI请求失败，请检查网络连接或稍后重试"

def smart_filter_data_unlimited(prompt, data):
    """智能筛选相关数据 - 返回全部符合条件的数据"""
    try:
        logger.info(f"开始智能数据筛选，数据总量: {len(data)}")
        
        # 简单的关键词匹配筛选
        relevant_data = []
        prompt_lower = prompt.lower()
        
        # 提取问题中的关键词
        keywords = []
        
        # 检查是否包含项目组名称
        project_groups = set()
        for record in data:
            project_group = record.get('项目组', '').strip()
            if project_group:
                project_groups.add(project_group.lower())
        
        for project in project_groups:
            if project in prompt_lower:
                keywords.append(('项目组', project))
        
        # 检查是否包含人员姓名
        persons = set()
        for record in data:
            person = record.get('编辑人', '').strip()
            if person:
                persons.add(person.lower())
        
        for person in persons:
            if person in prompt_lower:
                keywords.append(('编辑人', person))
        
        # 检查工作内容关键词
        work_keywords = ['开发', '测试', '设计', '会议', '需求', 'bug', '功能', '优化', '部署', '完成', '进行', '修复', '实现', '调试', '上线']
        for keyword in work_keywords:
            if keyword in prompt_lower:
                keywords.append(('工作内容', keyword))
        
        # 如果找到了关键词，进行筛选
        if keywords:
            logger.info(f"找到关键词: {keywords}")
            
            for record in data:
                is_relevant = False
                
                for keyword_type, keyword in keywords:
                    if keyword_type == '项目组':
                        if keyword in record.get('项目组', '').lower():
                            is_relevant = True
                            break
                    elif keyword_type == '编辑人':
                        if keyword in record.get('编辑人', '').lower():
                            is_relevant = True
                            break
                    elif keyword_type == '工作内容':
                        if keyword in record.get('今日工作总结', '').lower():
                            is_relevant = True
                            break
                
                if is_relevant:
                    relevant_data.append(record)
        
        # 如果没有找到相关数据或关键词，返回全部数据
        if not relevant_data:
            logger.info("未找到特定匹配，返回全部数据")
            relevant_data = data
        
        # 按日期倒序排列
        try:
            relevant_data = sort_data_by_date(relevant_data, reverse=True)
        except Exception as e:
            logger.warning(f"日期排序失败: {str(e)}")
        
        logger.info(f"智能筛选完成，返回 {len(relevant_data)} 条记录")
        return relevant_data
        
    except Exception as e:
        logger.warning(f"智能筛选失败，返回全部数据: {str(e)}")
        return data

def process_all_data_in_batches(question, data):
    """分批处理全部数据 - 优化版"""
    logger.info(f"开始分批处理全部数据，总计 {len(data)} 条记录")
    
    # 首先进行智能筛选，但不限制数量
    filtered_data = smart_filter_data_unlimited(question, data)
    
    logger.info(f"筛选后数据量: {len(filtered_data)} 条记录")
    
    # 将筛选后的数据分批
    batches = [filtered_data[i:i + BATCH_SIZE] for i in range(0, len(filtered_data), BATCH_SIZE)]
    batch_results = []
    
    logger.info(f"分为 {len(batches)} 批处理，每批最多 {BATCH_SIZE} 条")
    
    for i, batch in enumerate(batches):
        logger.info(f"处理第 {i+1}/{len(batches)} 批")
        
        # 简化数据格式
        context = ""
        for j, record in enumerate(batch):
            summary = record.get('今日工作总结', '')[:MAX_CONTENT_LENGTH]
            if len(summary) > MAX_CONTENT_LENGTH:
                summary = summary[:MAX_CONTENT_LENGTH] + "..."
            context += f"{j+1}. 项目组：{record.get('项目组', '')}\n   编辑人：{record.get('编辑人', '')}\n   日期：{record.get('填写日期', '')}\n   工作内容：{summary}\n\n"
        
        prompt = f"""分析第{i+1}批工作日报数据：

问题: {question}

数据 (第{i+1}/{len(batches)}批):
{context}

请简洁分析这批数据。"""
        
        batch_result = make_ai_request(prompt)
        if batch_result and not batch_result.startswith("AI服务暂时不可用"):
            batch_results.append(f"第{i+1}批分析:\n{batch_result}")
        
        # 批次间延迟，避免API限制
        if i < len(batches) - 1:
            logger.info("批次间等待10秒...")
            time.sleep(10)
    
    # 汇总所有批次结果
    if len(batch_results) > 1:
        summary_prompt = f"""请基于以下各批次的分析结果，对问题进行综合回答：

原问题: {question}

各批次分析结果:
{chr(10).join(batch_results)}

请提供一个综合性的总结分析。"""
        
        final_result = make_ai_request(summary_prompt, max_tokens=3000)
        return f"【综合分析】（基于{len(filtered_data)}条相关记录，分{len(batches)}批处理）\n\n{final_result}"
    
    elif len(batch_results) == 1:
        return f"【分析结果】（基于{len(filtered_data)}条相关记录）\n\n{batch_results[0]}"
    
    else:
        return "处理失败，请检查网络连接或稍后重试"

def sort_data_by_date(data, reverse=False):
    """按日期排序数据"""
    def get_sort_key(record):
        record_date = record['填写日期']
        
        if isinstance(record_date, str) and record_date.strip():
            date_formats = ['%Y-%m-%d', '%Y/%m/%d', '%m/%d/%Y', '%d/%m/%Y']
            for fmt in date_formats:
                try:
                    return datetime.strptime(record_date.strip(), fmt)
                except ValueError:
                    continue
            return datetime(1900, 1, 1)
        elif hasattr(record_date, 'date'):
            return record_date
        else:
            return datetime(1900, 1, 1)
    
    try:
        return sorted(data, key=get_sort_key, reverse=reverse)
    except Exception as e:
        logger.warning(f"日期排序失败: {str(e)}")
        return data

# 路由定义
@app.route('/')
def index():
    return render_template('index.html')

# 新增：上传 → 异步解析任务（解耦重活）
@app.route('/upload', methods=['POST'])
@limiter.limit("20/minute")  # 单 IP 上传限流（示例，可调）
def upload_file():
    logger.info("=== 文件上传请求开始（异步解析） ===")
    try:
        if 'file' not in request.files:
            return jsonify({'error': '没有选择文件'}), 400
        file = request.files['file']
        if file.filename == '':
            return jsonify({'error': '没有选择文件'}), 400
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            logger.info(f"保存文件到: {file_path}")
            file.save(file_path)
            # 生成 job_id 并入队解析任务
            job_id = str(uuid.uuid4())
            from backend.tasks import parse_excel_and_store  # 延迟导入避免循环依赖
            job = task_queue.enqueue(parse_excel_and_store, file_path, job_id, REDIS_CONFIG)
            logger.info(f"解析任务入队: {job.id} -> 业务ID: {job_id}")
            return jsonify({'message': '文件上传成功，已开始后台解析', 'job_id': job_id, 'rq_job_id': job.id, 'status': 'queued'})
        return jsonify({'error': '不支持的文件格式'}), 400
    except Exception as e:
        logger.error(f"文件上传失败: {str(e)}", exc_info=True)
        return jsonify({'error': f'文件上传失败: {str(e)}'}), 500

# 新增：查询解析任务状态和结果（只返回元数据，避免一次性返回大数据）
@app.route('/jobs/<job_id>', methods=['GET'])
def get_job_status(job_id):
    try:
        # 使用 RQ Job 状态 + Redis 辅助元数据
        meta_key = f"parsed:{job_id}:meta"
        meta = redis_client.get(meta_key)
        meta_json = json.loads(meta.decode('utf-8')) if meta else {}
        status = meta_json.get('status', 'pending')
        return jsonify({
            'job_id': job_id,
            'status': status,
            'records_count': meta_json.get('records_count', 0),
            'sheets': meta_json.get('sheets', []),
            'error': meta_json.get('error')
        })
    except Exception as e:
        logger.error(f"查询任务状态失败: {str(e)}", exc_info=True)
        return jsonify({'error': f'查询任务状态失败: {str(e)}'}), 500

# 新增：对已解析数据发起 AI 分析（异步）
@app.route('/jobs/<job_id>/ai', methods=['POST'])
@limiter.limit("60/minute")
def ai_analysis_async(job_id):
    try:
        data = request.get_json(force=True) or {}
        question = data.get('question', '').strip()
        if not question:
            return jsonify({'error': '请输入问题'}), 400
        from backend.tasks import ai_analysis_and_store
        ai_job = task_queue.enqueue(ai_analysis_and_store, job_id, question, AI_CONFIG, REDIS_CONFIG, REQUESTS_POOL)
        return jsonify({'job_id': job_id, 'ai_job_id': ai_job.id, 'status': 'queued'})
    except Exception as e:
        logger.error(f"AI分析任务入队失败: {str(e)}", exc_info=True)
        return jsonify({'error': f'AI分析任务入队失败: {str(e)}'}), 500

# 新增：查询 AI 分析任务结果
@app.route('/jobs/<job_id>/ai/<ai_job_id>', methods=['GET'])
def ai_result(job_id, ai_job_id):
    try:
        key = f"ai:{job_id}:{ai_job_id}"
        payload = redis_client.get(key)
        if not payload:
            return jsonify({'job_id': job_id, 'ai_job_id': ai_job_id, 'status': 'pending'})
        result = json.loads(payload.decode('utf-8'))
        return jsonify(result)
    except Exception as e:
        logger.error(f"获取AI结果失败: {str(e)}", exc_info=True)
        return jsonify({'error': f'获取AI结果失败: {str(e)}'}), 500

# 保留原有/summary 等接口（建议逐步迁移到 job_id 语义）
@app.route('/summary', methods=['GET'])
def get_summary():
    global parsed_data
    
    try:
        start_date = request.args.get('start_date')
        end_date = request.args.get('end_date')
        project_group_filter = request.args.get('project_group')
        page = int(request.args.get('page', 1))
        page_size = int(request.args.get('page_size', 20))
        
        if not parsed_data:
            return jsonify({
                'data': {},
                'pagination': {
                    'current_page': 1,
                    'page_size': page_size,
                    'total_records': 0,
                    'total_pages': 0,
                    'has_prev': False,
                    'has_next': False,
                    'start_index': 0,
                    'end_index': 0
                }
            })
        
        filtered_data = parsed_data.copy()
        
        # 时间筛选
        if start_date or end_date:
            temp_data = []
            for record in parsed_data:
                record_date = record.get('填写日期', '')
                if not record_date:
                    continue
                
                try:
                    if isinstance(record_date, str):
                        parsed_date = datetime.strptime(record_date, '%Y-%m-%d').date()
                    else:
                        parsed_date = record_date
                    
                    if start_date and parsed_date < datetime.strptime(start_date, '%Y-%m-%d').date():
                        continue
                    if end_date and parsed_date > datetime.strptime(end_date, '%Y-%m-%d').date():
                        continue
                    
                    temp_data.append(record)
                except:
                    continue
            
            filtered_data = temp_data
        else:
            # 未选择时间时，按日期倒序排列
            filtered_data = sort_data_by_date(filtered_data, reverse=True)
        
        # 项目组筛选
        if project_group_filter:
            filtered_data = [record for record in filtered_data 
                           if str(record.get('项目组', '')).strip() == project_group_filter.strip()]
        
        # 分页
        total_records = len(filtered_data)
        total_pages = max(1, (total_records + page_size - 1) // page_size)
        start_index = (page - 1) * page_size
        end_index = min(start_index + page_size, total_records)
        
        current_page_data = filtered_data[start_index:end_index]
        
        # 按项目组分组
        grouped_data = {}
        for record in current_page_data:
            project_group = str(record.get('项目组', '未知项目组'))
            if project_group not in grouped_data:
                grouped_data[project_group] = []
            grouped_data[project_group].append(record)
        
        # 每个项目组内按日期倒序
        for project_group in grouped_data:
            grouped_data[project_group] = sort_data_by_date(grouped_data[project_group], reverse=True)
        
        return jsonify({
            'data': grouped_data,
            'pagination': {
                'current_page': page,
                'page_size': page_size,
                'total_records': total_records,
                'total_pages': total_pages,
                'has_prev': page > 1,
                'has_next': page < total_pages,
                'start_index': start_index + 1 if total_records > 0 else 0,
                'end_index': end_index
            }
        })
        
    except Exception as e:
        logger.error(f"数据筛选失败: {str(e)}", exc_info=True)
        return jsonify({'error': f'数据筛选失败: {str(e)}'}), 500

@app.route('/ai_analysis', methods=['POST'])
def ai_analysis():
    global parsed_data
    
    logger.info("=== AI分析请求开始 ===")
    
    try:
        data = request.get_json()
        question = data.get('question', '')
        
        if not question:
            return jsonify({'error': '请输入问题'}), 400
        
        if not parsed_data:
            return jsonify({'error': '请先上传Excel文件'}), 400
        
        logger.info(f"开始分析 {len(parsed_data)} 条记录")
        
        # 处理全部数据
        ai_response = process_all_data_in_batches(question, parsed_data)
        
        return jsonify({
            'question': question,
            'answer': ai_response,
            'data_count': len(parsed_data)
        })
        
    except Exception as e:
        logger.error(f'AI分析失败: {str(e)}', exc_info=True)
        return jsonify({'error': f'AI分析失败: {str(e)}'}), 500

@app.route('/data', methods=['GET'])
def get_data():
    global parsed_data
    try:
        return jsonify({
            'total': len(parsed_data),
            'data': parsed_data
        })
    except Exception as e:
        return jsonify({'error': f'获取数据失败: {str(e)}'}), 500

@app.route('/project_groups', methods=['GET'])
def get_project_groups():
    global parsed_data
    try:
        project_groups = set()
        for record in parsed_data:
            project_group = str(record.get('项目组', '')).strip()
            if project_group:
                project_groups.add(project_group)
        
        return jsonify({
            'project_groups': sorted(list(project_groups))
        })
    except Exception as e:
        return jsonify({'error': f'获取项目组失败: {str(e)}'}), 500

if __name__ == '__main__':
    logger.info("启动智能工作日报系统...")
    app.run(debug=True, host='0.0.0.0', port=5000)