#!/usr/bin/env python3
# -*- coding: utf-8 -*-

#  Copyright (C) 2020 ShenZhen Powersmart Information Technology Co.,Ltd
#  All Rights Reserved.
#  本软件为深圳博沃智慧开发研制。未经本公司正式书面同意，其他任何个人、团体不得使用、
#  复制、修改或发布本软件.

# @Time : 2025/9/10 09:19
# @File : extract_services.py
# @Project: 
# @Author : wanghongtao
# @Software: PyCharm
import re
import os
import time
from typing import Dict, Any, List, Optional
from dataclasses import dataclass
import jieba
import jieba.analyse
import pandas as pd

from services.ai_service_factory import AIService, ServiceConfig, ServiceRequest, ServiceResponse, ServiceStatus, \
    ServiceType
from core.config import settings
from core.logging_config import get_logger

# 日志
logger = get_logger("extract_services")

@dataclass
class ExtractedInfo:
    """提取的信息"""
    city: Optional[str] = None  # 城市
    county: Optional[str] = None  # 区县
    street: Optional[str] = None  # 街道
    lat: Optional[str] = None  # 纬度
    long: Optional[str] = None  # 经度
    key_points: List[str] = None  # 地址信息

    def __post_init__(self):
        if self.key_points is None:
            self.key_points = []


class InformationExtractionService(AIService):
    """信息抽取服务"""

    def __init__(self, config: ServiceConfig=None):
        if config is None:
            config = ServiceConfig(
                service_type=ServiceType.EXTRACT_INFO,
                name="信息抽取服务",
                description="从信访内容中抽取街道、区县信息",
                version="1.0.0"
            )
        super().__init__(config)
        self.config = config
        self._jd_list, self._qx_list, self._local_qx_jd = self._load_location_patterns()

    # 实现AIService的抽象方法
    async def initialize(self) -> bool:
        """初始化服务"""
        try:
            logger.info(f"区县、街道识别服务初始化成功")
            self.status = ServiceStatus.READY
            return True
        except Exception as e:
            logger.error(f"区县、街道识别服务初始化成功失败: {str(e)}")
            return False

    def _load_location_patterns(self) -> List[str]:
        """加载地理位置模式"""
        ## 加载行政区划的信息
        excel_path = settings.local_data
        if os.path.exists(excel_path):
            local_data = pd.read_excel(excel_path)
            local_qx_jd = {}
            ds_dict = {}
            qx_list = []
            for index, row in local_data.iterrows():
                qxmc = row["ts_qxs"]
                qx = local_qx_jd.get(row["ts_qxs"])
                if qx:
                    jd = str(row['ts_jd'])
                    qx.append(jd)
                    # if jd not in qx and jd != "无" and ("镇" in jd or "街道" in jd):
                    #     qx.append(jd)
                    if qxmc not in qx_list:
                        qx_list.append(qxmc)
                else:
                    local_qx_jd[row["ts_qxs"]] = [row['ts_jd']]
            jd_list = list(local_data["ts_jd"])
            return jd_list, qx_list, local_qx_jd
        else:
            logger.error(f"本地数据文件不存在: {excel_path}")
            return [], [], {}

    def recog_local_qxjd_no_record(self, dd):
        """通过标准区县街道进行识别"""
        try:
            qx = ""
            jd = ""
            if not jd:
                xfnr = str(dd["xfnr"]) + str(dd["ts_gk"]) + str(dd["ts_dz"]) + str(dd["ts_xzq"]) + str(
                    dd["ts_dfnr"])
                for temp_qx in self._qx_list:
                    if temp_qx in xfnr:
                        qx = temp_qx
                        temp_jd_list = self._local_qx_jd.get(temp_qx, [])
                        for temp_jd in temp_jd_list:
                            if temp_jd in xfnr:
                                jd = temp_jd
                                return qx, jd
                        return qx, ""
                for new_jd in self._jd_list:
                    if new_jd in xfnr:
                        for new_qx, temp_jd in self._local_qx_jd.items():
                            if new_jd in temp_jd:
                                return new_qx, new_jd
                return qx, jd
            else:
                return qx, jd

        except Exception as e:
            print(f"获取本地街道信息异常：{e}")
            return "", ""

    async def initialize(self) -> bool:
        """初始化服务"""
        try:
            # 初始化jieba分词
            jieba.initialize()

            # 加载自定义词典（如果有的话）
            try:
                jieba.load_userdict("custom_dict.txt")
            except FileNotFoundError:
                logger.info("未找到自定义词典，使用默认配置")

            logger.info("信息抽取服务初始化成功")
            return True

        except Exception as e:
            logger.error(f"信息抽取服务初始化失败: {str(e)}")
            return False

    async def process(self, request: ServiceRequest) -> ServiceResponse:
        """处理信息抽取请求"""
        start_time = time.time()

        try:
            text = request.data.get("xfnr", "")
            if not text:
                return ServiceResponse(
                    request_id=request.request_id,
                    service_type=request.service_type,
                    success=False,
                    error_message="文本内容不能为空"
                )

            # 执行信息抽取
            extracted_info = self._extract_information(text)

            return ServiceResponse(
                request_id=request.request_id,
                service_type=request.service_type,
                success=True,
                data={
                    "extracted_info": extracted_info.__dict__,
                    "confidence_score": self._calculate_confidence(extracted_info),
                    "processing_details": {
                        "text_length": len(text),
                        "extraction_methods": ["pattern_matching", "keyword_extraction", "entity_recognition"]
                    }
                },
                processing_time=time.time() - start_time
            )

        except Exception as e:
            logger.error(f"信息抽取失败: {str(e)}")
            return ServiceResponse(
                request_id=request.request_id,
                service_type=request.service_type,
                success=False,
                error_message=str(e),
                processing_time=time.time() - start_time
            )

    def get_qxjd_by_record(self, dd):
        """通过问题属地获取区县街道"""
        try:
            qx = dd["ts_qxs"]
            jd = dd["ts_jd"]
            if qx in self._qx_list:
                temp_qx = self._local_qx_jd.get(qx)
                if jd in temp_qx:
                    return qx, jd
                else:
                    return qx, ""
            else:
                return "", ""
        except Exception as e:
            print(f"获取街道信息异常：{e}")
            return "", ""

    def wtsd_location_recog(self, dd):
        """通过问题属地获取区县街道"""
        import jionlp as jio
        qx = dd["ts_qxs"]
        jd = dd["ts_jd"]
        xxdz = dd["ts_dz"]

        if not jd and xxdz:
            wtsd = str(xxdz)
            location_dict = jio.parse_location(wtsd, town_village=True)
            new_qx = location_dict.get("county")
            jd = location_dict.get("town")
            if jd in self._jd_list:
                for new_qx, temp_jd in self._local_qx_jd.items():
                    if jd in temp_jd:
                        return new_qx, jd
        return qx, jd

    def _extract_information(self, text: str) -> ExtractedInfo:
        """提取信息"""
        info = ExtractedInfo()

        # 提取区县、街道
        info.county, info.street = self.get_qxjd_by_record(text)

        return info

    def _extract_complainant_name(self, text: str) -> Optional[str]:
        """提取投诉人姓名"""
        patterns = [
            r'我叫([^\s，。！？]{2,4})',
            r'投诉人[：:]\s*([^\s，。！？]{2,4})',
            r'([^\s，。！？]{2,4})投诉',
            r'市民([^\s，。！？]{2,4})反映'
        ]

        for pattern in patterns:
            match = re.search(pattern, text)
            if match:
                name = match.group(1)
                # 过滤掉非姓名字符
                if re.match(r'^[\u4e00-\u9fa5]{2,4}$', name):
                    return name

        return None

    def _extract_respondent_name(self, text: str) -> Optional[str]:
        """提取被投诉方名称"""
        patterns = [
            r'投诉([^\s，。！？]{2,10})',
            r'([^\s，。！？]{2,10})公司',
            r'([^\s，。！？]{2,10})工厂',
            r'([^\s，。！？]{2,10})企业',
            r'被投诉方[：:]\s*([^\s，。！？]{2,10})'
        ]

        for pattern in patterns:
            match = re.search(pattern, text)
            if match:
                return match.group(1).strip()

        return None

    def _extract_location(self, text: str) -> Optional[str]:
        """提取位置信息"""
        for pattern in self._location_patterns:
            matches = re.findall(pattern, text)
            if matches:
                # 合并匹配的位置信息
                location = " ".join(matches[:3])  # 最多取前3个位置词
                return location if len(location) >= 2 else None

        return None

    def _extract_pollution_type(self, text: str) -> Optional[str]:
        """提取污染类型"""
        max_matches = 0
        detected_type = None

        for pollution_type, keywords in self._pollution_keywords.items():
            matches = sum(1 for keyword in keywords if keyword in text)
            if matches > max_matches:
                max_matches = matches
                detected_type = pollution_type

        return detected_type

    async def health_check(self) -> Dict[str, Any]:
        """健康检查"""
        return {
            "healthy": True,
            "service_type": "information_extraction",
            "capabilities": {
                "supported_fields": ["complainant_name", "respondent_name", "location", "pollution_type", "time_period",
                                     "key_points", "severity_level"],
                "max_text_length": 10000,
                "processing_languages": ["zh-CN"]
            }
        }

    def get_capabilities(self) -> Dict[str, Any]:
        """获取服务能力"""
        return {
            "description": "从信访文本中提取区县、街道等",
            "input_format": {"text": "string"},
            "output_format": {
                "extracted_info": "object",
                "confidence_score": "float",
                "processing_details": "object"
            },
            "supported_features": [
                "entity_recognition",
                "keyword_extraction",
                "location_detection",
                "severity_assessment"
            ]
        }

