import os
import time
from datetime import timedelta
from typing import Dict, Any, Generator, List, Optional
import easyocr
from openai import OpenAI, APIError
from PyPDF2 import PdfReader
from docx import Document
import numpy as np
import cv2
import re


def classify_attendance_type(text: str) -> str:
    """根据文本内容识别考勤记录类型"""
    categories = {
        # 按记录形式
        "打卡记录": ["打卡", "刷卡", "指纹", "人脸识别", "上班打卡", "下班打卡", "考勤机"],
        "签到表": ["签到", "签退", "到岗", "离岗", "出勤表", "签名"],
        "工时统计表": ["工时", "工作时长", "累计时间", "总工时", "有效工时"],
        "加班记录": ["加班", "延时", "超时", "加班费", "加班申请", "加班时长"],
        "请假记录": ["请假", "病假", "事假", "年假", "婚假", "产假", "丧假", "调休"],
        "出差记录": ["出差", "外勤", "外出", "差旅", "出差申请"],
        "值班记录": ["值班", "夜班", "轮班", "倒班", "班次"],
        "迟到早退记录": ["迟到", "早退", "缺勤", "旷工", "异常"],
        
        # 按统计周期
        "日考勤记录": ["日期", "当日", "今日", "本日"],
        "周考勤汇总": ["本周", "周汇总", "一周", "星期"],
        "月考勤汇总": ["本月", "月汇总", "月度", "当月"],
        "年度考勤统计": ["年度", "全年", "年终", "年汇总"],
        
        # 按管理用途
        "薪资计算依据": ["薪资", "工资", "计薪", "绩效", "奖金"],
        "绩效考核记录": ["绩效", "考核", "评估", "KPI", "目标完成"],
        "违规处罚记录": ["违规", "处罚", "警告", "扣分", "违纪"],
        "调岗调班记录": ["调岗", "调班", "换班", "岗位调整"]
    }

    for name, keywords in categories.items():
        if any(keyword in text for keyword in keywords):
            return name
    return "无法识别/可能为非标准考勤记录"


def extract_attendance_info(text: str) -> Dict[str, Any]:
    """从考勤记录文本中提取关键信息"""
    info = {
        "员工姓名": None,
        "员工工号": None,
        "部门": None,
        "日期范围": None,
        "上班时间": [],
        "下班时间": [],
        "工作时长": None,
        "加班时长": None,
        "迟到次数": None,
        "早退次数": None,
        "请假天数": None,
        "出勤天数": None
    }
    
    # 提取员工姓名
    name_patterns = [
        r"姓名[：:：]\s*([\u4e00-\u9fa5]{2,4})",
        r"员工[：:：]\s*([\u4e00-\u9fa5]{2,4})",
        r"([\u4e00-\u9fa5]{2,4})\s*的考勤"
    ]
    for pattern in name_patterns:
        match = re.search(pattern, text)
        if match:
            info["员工姓名"] = match.group(1)
            break
    
    # 提取工号
    id_patterns = [
        r"工号[：:：]\s*(\w+)",
        r"员工号[：:：]\s*(\w+)",
        r"编号[：:：]\s*(\w+)"
    ]
    for pattern in id_patterns:
        match = re.search(pattern, text)
        if match:
            info["员工工号"] = match.group(1)
            break
    
    # 提取部门
    dept_patterns = [
        r"部门[：:：]\s*([\u4e00-\u9fa5]+部)",
        r"科室[：:：]\s*([\u4e00-\u9fa5]+科)",
        r"([\u4e00-\u9fa5]+部)[：:：]"
    ]
    for pattern in dept_patterns:
        match = re.search(pattern, text)
        if match:
            info["部门"] = match.group(1)
            break
    
    # 提取时间信息
    time_patterns = {
        "上班时间": [r"上班[：:：]\s*(\d{1,2}[：:]\d{2})", r"签到[：:：]\s*(\d{1,2}[：:]\d{2})"],
        "下班时间": [r"下班[：:：]\s*(\d{1,2}[：:]\d{2})", r"签退[：:：]\s*(\d{1,2}[：:]\d{2})"],
        "工作时长": [r"工作时长[：:：]\s*(\d+\.?\d*)小时", r"工时[：:：]\s*(\d+\.?\d*)h"],
        "加班时长": [r"加班[：:：]\s*(\d+\.?\d*)小时", r"加班时长[：:：]\s*(\d+\.?\d*)h"]
    }
    
    for key, patterns in time_patterns.items():
        for pattern in patterns:
            matches = re.findall(pattern, text)
            if matches:
                if key in ["上班时间", "下班时间"]:
                    info[key] = matches
                else:
                    try:
                        info[key] = float(matches[0])
                    except ValueError:
                        pass
                break
    
    # 提取统计数据
    stat_patterns = {
        "迟到次数": [r"迟到[：:：]\s*(\d+)次", r"迟到\s*(\d+)"],
        "早退次数": [r"早退[：:：]\s*(\d+)次", r"早退\s*(\d+)"],
        "请假天数": [r"请假[：:：]\s*(\d+)天", r"请假\s*(\d+)"],
        "出勤天数": [r"出勤[：:：]\s*(\d+)天", r"出勤\s*(\d+)"]
    }
    
    for key, patterns in stat_patterns.items():
        for pattern in patterns:
            match = re.search(pattern, text)
            if match:
                try:
                    info[key] = int(match.group(1))
                except ValueError:
                    pass
                break
    
    return info


class AttendanceAnalyzer:
    def __init__(self, api_key: str):
        self.client = OpenAI(
            api_key=api_key,
            base_url="https://api.deepseek.com"
        )

    def _extract_from_image(self, file_path: str, reader=None) -> str:
        try:
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"文件不存在: {file_path}")
            img_array = np.fromfile(file_path, dtype=np.uint8)
            image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
            if image is None:
                raise ValueError("无法解析图像，cv2.imdecode 失败")
            reader = reader or easyocr.Reader(['ch_sim', 'en'])
            result = reader.readtext(image, detail=0)
            return "\n".join(result).strip()
        except Exception as e:
            raise RuntimeError(f"图片OCR识别失败: {str(e)}")

    def extract_text(self, file_path: str) -> str:
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")
        ext = file_path.lower()
        if ext.endswith('.pdf'):
            return self._extract_from_pdf(file_path)
        elif ext.endswith('.docx'):
            return self._extract_from_docx(file_path)
        elif ext.endswith(('.png', '.jpg', '.jpeg', '.bmp', '.tiff', '.webp')):
            return self._extract_from_image(file_path)
        else:
            raise ValueError("不支持的文件格式，请提供PDF、DOCX或图片文件")

    def extract_texts_from_multiple_images(self, image_paths: List[str]) -> str:
        reader = easyocr.Reader(['ch_sim', 'en'])
        all_text = []
        for path in image_paths:
            try:
                text = self._extract_from_image(path, reader)
                all_text.append(text)
            except Exception as e:
                print(f"\n⚠️ 图片处理失败 [{path}]: {str(e)}")
        return "\n\n".join(all_text)

    def _extract_from_pdf(self, file_path: str) -> str:
        text = ""
        try:
            with open(file_path, 'rb') as file:
                reader = PdfReader(file)
                for page in reader.pages:
                    text += page.extract_text() or ""
        except Exception as e:
            raise RuntimeError(f"PDF文件读取失败: {str(e)}")
        return text

    def _extract_from_docx(self, file_path: str) -> str:
        try:
            doc = Document(file_path)
            return "\n".join([para.text for para in doc.paragraphs if para.text])
        except Exception as e:
            raise RuntimeError(f"DOCX文件读取失败: {str(e)}")

    def analyze_attendance_stream(
        self,
        text: str,
        attendance_type: str,
        extracted_info: Dict[str, Any]
    ) -> Generator[str, None, Dict[str, Any]]:
        if not text.strip():
            raise ValueError("考勤记录文本内容为空")

        # 构建提取信息的摘要
        info_summary = "\n".join([
            f"- {k}: {v}" for k, v in extracted_info.items() 
            if v is not None and v != [] and v != {}
        ])

        system_prompt = f"""你是一位专业的人力资源专家和劳动法律师，负责分析考勤记录的合规性和合理性。
该考勤记录初步识别为：{attendance_type}

已提取的关键信息：
{info_summary}

请严格按照以下要求进行分析：
1. 首先确认考勤记录类型是否准确
2. 给出总体评价（规范/基本规范/存在问题/严重不规范）
3. 分析考勤记录的完整性和准确性
4. 检查以下关键项目：
   - 员工基本信息是否完整（姓名、工号、部门等）
   - 考勤时间记录是否准确（上下班时间、工作时长等）
   - 加班记录是否符合劳动法规定（加班时长、加班审批等）
   - 请假记录是否完整（请假类型、时长、审批等）
   - 异常考勤处理是否合规（迟到、早退、旷工等）
   - 工时统计是否符合标准工时制度
   - 休息休假安排是否符合法律规定
   - 考勤数据是否存在异常或造假可能
5. 指出存在的问题和风险
6. 如发现违法违规情况，说明相关法律依据
7. 针对员工权益保护给出专业建议"""

        try:
            stream = self.client.chat.completions.create(
                model="deepseek-chat",
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": f"请分析以下考勤记录：\n{text[:15000]}"}
                ],
                temperature=0.3,
                max_tokens=2000,
                stream=True
            )

            collected_content = []
            total_tokens = 0
            for chunk in stream:
                if not chunk.choices:
                    continue
                delta = chunk.choices[0].delta
                if delta and delta.content:
                    collected_content.append(delta.content)
                    yield delta.content
                if hasattr(chunk, 'usage') and chunk.usage:
                    total_tokens = chunk.usage.total_tokens

            return {"metadata": {"total_tokens": total_tokens, "complete_response": "".join(collected_content)}}

        except APIError as e:
            raise RuntimeError(f"API请求失败: {str(e)}")
        except Exception as e:
            raise RuntimeError(f"分析过程中出错: {str(e)}")


def main():
    API_KEY = "sk-20856422ed6644e3827b9d5403c9542a"  # 替换为你的API密钥
    analyzer = AttendanceAnalyzer(API_KEY)

    print("考勤记录分析工具（流式输出版）")
    print("=" * 40)
    file_input = input("请输入考勤记录路径（多个图片用英文逗号分隔，或一个PDF/DOCX）: ").strip()
    file_paths = [p.strip() for p in file_input.split(',') if p.strip()]

    if not file_paths:
        print("❌ 未输入有效路径")
        return

    # 判断模式
    if len(file_paths) > 1:
        if all(p.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp')) for p in file_paths):
            mode = "multi_image"
        else:
            print("❌ 当前仅支持：多张图片 或 单个 PDF/DOCX。请检查输入。")
            return
    elif file_paths[0].lower().endswith(('.pdf', '.docx')):
        mode = "single_document"
    elif file_paths[0].lower().endswith(('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp')):
        mode = "single_image"
    else:
        print("❌ 当前仅支持：多张图片 或 单个 PDF/DOCX。请检查输入。")
        return

    try:
        # 文本提取
        if mode == "multi_image":
            text = analyzer.extract_texts_from_multiple_images(file_paths)
        else:
            text = analyzer.extract_text(file_paths[0])

        if not text.strip():
            print("❌ 没有提取到有效文本，终止分析")
            return

        # 初步识别类型和提取信息
        attendance_type = classify_attendance_type(text)
        extracted_info = extract_attendance_info(text)
        
        print(f"\n📌 初步识别的考勤记录类型：{attendance_type}")
        print("\n📋 提取的关键信息：")
        for key, value in extracted_info.items():
            if value is not None and value != [] and value != {}:
                print(f"   {key}: {value}")

        print("\n正在分析考勤记录，请稍候...\n")
        print("=" * 40)
        print("实时分析结果:")
        print("=" * 40)

        start_time = time.time()
        full_response = []
        metadata = {}

        # 分析并输出
        for chunk in analyzer.analyze_attendance_stream(text, attendance_type, extracted_info):
            print(chunk, end="", flush=True)
            full_response.append(chunk)
        elapsed = time.time() - start_time

        print("\n\n" + "=" * 40)
        print("分析完成!")
        if metadata and metadata.get('total_tokens', 0) > 0:
            print(f"\n总Tokens: {metadata['total_tokens']}")
            print(f"总耗时: {timedelta(seconds=elapsed)}")
            print(f"处理速度: {metadata['total_tokens']/elapsed:.2f} tokens/秒")

        # 保存结果
        save_path = os.path.join(os.path.dirname(file_paths[0]), "attendance_analysis_result.txt")
        with open(save_path, 'w', encoding='utf-8') as f:
            f.write(f"考勤记录类型：{attendance_type}\n\n")
            f.write(f"提取的关键信息：\n")
            for key, value in extracted_info.items():
                if value is not None and value != [] and value != {}:
                    f.write(f"- {key}: {value}\n")
            f.write(f"\n分析结果：\n")
            f.write("".join(full_response))
        print(f"\n✅ 分析结果已保存到: {save_path}")

    except KeyboardInterrupt:
        print("\n用户中断操作")
    except Exception as e:
        print(f"\n程序发生错误: {str(e)}")


if __name__ == "__main__":
    main()