from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from typing import Optional, Dict, Any, List
import os
import tempfile
import shutil
from datetime import datetime
import re
import logging

# 导入原有的分析模块
from payslip_analyzer import PayslipAnalyzer, classify_payslip_type, extract_payslip_info

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI(
    title="工资单分析API",
    description="基于FastAPI的工资单文件分析服务",
    version="1.0.0"
)

# 响应模型
class PayslipAnalysisResponse(BaseModel):
    文件类型: str
    主体公司名称: str
    起始日期: str
    结束日期: str
    平均薪资: str
    文件内容: str
    文件有效性说明: str
    是否可以作为证据: str

# 支持的文件格式
SUPPORTED_EXTENSIONS = {
    '.pdf', '.docx', '.png', '.jpg', '.jpeg', '.bmp', '.tiff', '.webp'
}

# API密钥配置
API_KEY = "sk-20856422ed6644e3827b9d5403c9542a"

@app.get("/")
async def root():
    return {"message": "工资单分析API服务正在运行"}

@app.get("/health")
async def health_check():
    return {"status": "healthy", "timestamp": datetime.now().isoformat()}

def validate_file_format(filename: str) -> bool:
    """验证文件格式是否支持"""
    ext = os.path.splitext(filename.lower())[1]
    return ext in SUPPORTED_EXTENSIONS

def extract_company_name(text: str) -> str:
    """从文本中提取公司名称"""
    company_patterns = [
        r"([\u4e00-\u9fa5]+(?:有限公司|股份有限公司|集团|公司))",
        r"公司[：:]([\u4e00-\u9fa5]+(?:有限公司|股份有限公司|集团|公司))",
        r"单位[：:]([\u4e00-\u9fa5]+(?:有限公司|股份有限公司|集团|公司))"
    ]
    
    for pattern in company_patterns:
        match = re.search(pattern, text)
        if match:
            return match.group(1).strip()
    return ""

def extract_date_range(text: str) -> tuple:
    """从文本中提取起始和结束日期"""
    # 匹配各种日期格式
    date_patterns = [
        r"(\d{4})年(\d{1,2})月",
        r"(\d{4})-(\d{1,2})",
        r"(\d{4})\.(\d{1,2})",
        r"(\d{4})/(\d{1,2})"
    ]
    
    dates = []
    for pattern in date_patterns:
        matches = re.findall(pattern, text)
        for match in matches:
            year, month = match
            dates.append(f"{year}年{month.zfill(2)}月")
    
    if dates:
        # 去重并排序
        unique_dates = sorted(list(set(dates)))
        if len(unique_dates) >= 2:
            return unique_dates[0], unique_dates[-1]
        elif len(unique_dates) == 1:
            return unique_dates[0], unique_dates[0]
    
    return "", ""

def calculate_average_salary(extracted_info: Dict[str, Any]) -> str:
    """计算平均薪资"""
    salary_fields = ["实发工资", "应发工资", "基本工资"]
    
    for field in salary_fields:
        if extracted_info.get(field) and isinstance(extracted_info[field], (int, float)):
            return f"{extracted_info[field]:.2f}元"
    
    return ""

def assess_evidence_validity(text: str, extracted_info: Dict[str, Any]) -> tuple:
    """评估文件作为证据的有效性"""
    validity_score = 0
    issues = []
    
    # 检查基本信息完整性
    required_fields = ["员工姓名", "实发工资", "应发工资"]
    for field in required_fields:
        if extracted_info.get(field):
            validity_score += 20
        else:
            issues.append(f"缺少{field}")
    
    # 检查是否有公司信息
    if extract_company_name(text):
        validity_score += 20
    else:
        issues.append("缺少公司名称")
    
    # 检查是否有日期信息
    start_date, end_date = extract_date_range(text)
    if start_date:
        validity_score += 20
    else:
        issues.append("缺少日期信息")
    
    # 生成有效性说明
    if validity_score >= 80:
        validity_desc = "文件信息完整，格式规范，可作为有效证据"
        can_be_evidence = "是"
    elif validity_score >= 60:
        validity_desc = f"文件基本完整，但存在以下问题：{'; '.join(issues)}"
        can_be_evidence = "是"
    else:
        validity_desc = f"文件信息不完整，存在以下问题：{'; '.join(issues)}，建议补充相关信息"
        can_be_evidence = "否"
    
    return validity_desc, can_be_evidence

@app.post("/analyze-payslip", response_model=PayslipAnalysisResponse)
async def analyze_payslip(file: UploadFile = File(...)):
    """分析上传的工资单文件"""
    try:
        # 验证文件格式
        if not validate_file_format(file.filename):
            raise HTTPException(
                status_code=400,
                detail=f"不支持的文件格式。支持的格式：{', '.join(SUPPORTED_EXTENSIONS)}"
            )
        
        # 验证文件大小（限制为10MB）
        if file.size and file.size > 10 * 1024 * 1024:
            raise HTTPException(
                status_code=400,
                detail="文件大小超过限制（10MB）"
            )
        
        # 创建临时文件
        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as temp_file:
            shutil.copyfileobj(file.file, temp_file)
            temp_file_path = temp_file.name
        
        try:
            # 初始化分析器
            analyzer = PayslipAnalyzer(API_KEY)
            
            # 提取文本
            text = analyzer.extract_text(temp_file_path)
            
            if not text.strip():
                raise HTTPException(
                    status_code=400,
                    detail="无法从文件中提取有效文本内容"
                )
            
            # 分类工资单类型
            payslip_type = classify_payslip_type(text)
            
            # 提取关键信息
            extracted_info = extract_payslip_info(text)
            
            # 提取公司名称
            company_name = extract_company_name(text)
            
            # 提取日期范围
            start_date, end_date = extract_date_range(text)
            
            # 计算平均薪资
            average_salary = calculate_average_salary(extracted_info)
            
            # 评估有效性
            validity_desc, can_be_evidence = assess_evidence_validity(text, extracted_info)
            
            # 构建响应
            response = PayslipAnalysisResponse(
                文件类型="工资单",
                主体公司名称=company_name,
                起始日期=start_date,
                结束日期=end_date,
                平均薪资=average_salary,
                文件内容=text[:1000] + "..." if len(text) > 1000 else text,  # 限制内容长度
                文件有效性说明=validity_desc,
                是否可以作为证据=can_be_evidence
            )
            
            logger.info(f"成功分析文件: {file.filename}, 类型: {payslip_type}")
            return response
            
        finally:
            # 清理临时文件
            if os.path.exists(temp_file_path):
                os.unlink(temp_file_path)
    
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"分析文件时发生错误: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"文件分析失败: {str(e)}"
        )

@app.exception_handler(Exception)
async def global_exception_handler(request, exc):
    logger.error(f"未处理的异常: {str(exc)}")
    return JSONResponse(
        status_code=500,
        content={"detail": "服务器内部错误"}
    )

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8002)