#!/usr/bin/env python3
# -*- coding: utf-8 -*-

#  Copyright (C) 2020 ShenZhen Powersmart Information Technology Co.,Ltd
#  All Rights Reserved.
#  本软件为深圳博沃智慧开发研制。未经本公司正式书面同意，其他任何个人、团体不得使用、
#  复制、修改或发布本软件.

# @Time : 2025/9/12 15:16
# @File : duplicate_detection_service.py
# @Project:
# @Author : wanghongtao
# @Software: PyCharm

import re
import numpy as np
import pandas as pd
import os
import json
from typing import List, Dict, Tuple, Any, Optional
from datetime import datetime
from tqdm import tqdm
import jionlp as jio
from collections import defaultdict
from sqlalchemy.orm import Session
from sqlalchemy import and_, or_

from core.database import SessionLocal
from core.logging_config import get_logger
from models.petition_record import PetitionRecord, RegionalAnalysisResult
from models.analysis_task import AnalysisTask, TaskStatus

logger = get_logger("duplicate_detection")


class UnionFind:
    """
    并查集类，用于合并列表中的相同元素。
    将包含相同元素的列表合并的问题，可以使用并查集（Union-Find）来高效地合并所有具有交集的列表。
    方法思路
    1. 建立元素到列表索引的映射：遍历每个列表，记录每个元素出现在哪些列表中。
    2. 合并具有相同元素的列表：使用并查集将所有包含相同元素的列表索引合并到同一组。
    3. 合并元素并去重：根据并查集的合并结果，将同一组的列表元素合并并去重，生成最终结果。
    例子：
    输入：[[1, 2, 3], [2, 3, 4], [3, 4, 5], [6, 7, 8]]
    输出：[[1, 2, 3, 4, 5], [6, 7, 8]]
    """
    def __init__(self, size: int):
        self.parent = list(range(size))
        self.rank = [0] * size

    def find(self, x: int) -> int:
        if self.parent[x] != x:
            self.parent[x] = self.find(self.parent[x])  # 路径压缩
        return self.parent[x]

    def union(self, x: int, y: int) -> None:
        xroot = self.find(x)
        yroot = self.find(y)
        if xroot == yroot:
            return
        # 按秩合并
        if self.rank[xroot] < self.rank[yroot]:
            self.parent[xroot] = yroot
        else:
            self.parent[yroot] = xroot
            if self.rank[xroot] == self.rank[yroot]:
                self.rank[xroot] += 1


class DuplicateDetectionService:
    """重复投诉检测服务"""

    def __init__(self, district_file_path: str = None):
        """
        初始化重复检测服务

        Args:
            district_file_path: 宁波区划文件路径，如果为None则使用默认路径
        """
        self.district_file_path = district_file_path or os.path.join(
            os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
            "data", "宁波信访项目的数据结构-污染类型.xlsx"
        )
        self.local_data = None
        self.local_qx_jd = {}
        self.ds_dict = {}
        self.qx_list = []
        self.jd_list = []

        # 加载区划数据
        self._load_district_data()

    def _load_district_data(self) -> None:
        """加载宁波区划数据"""
        try:
            if not os.path.exists(self.district_file_path):
                logger.warning(f"区划文件不存在: {self.district_file_path}")
                return

            self.local_data = pd.read_excel(self.district_file_path)

            for index, row in self.local_data.iterrows():
                qxmc = str(row.get("区县", "")).strip()
                jdmc = str(row.get("乡镇名称", "")).strip()

                if not qxmc or qxmc == "nan":
                    continue

                qx = self.local_qx_jd.get(qxmc)
                if qx:
                    if jdmc and jdmc != "nan":
                        qx.append(jdmc)
                    if qxmc not in self.qx_list:
                        self.qx_list.append(qxmc)
                else:
                    self.local_qx_jd[qxmc] = [jdmc] if jdmc and jdmc != "nan" else []
                    if qxmc not in self.qx_list:
                        self.qx_list.append(qxmc)

            # 提取所有街道列表
            if "乡镇名称" in self.local_data.columns:
                self.jd_list = [str(jd).strip() for jd in self.local_data["乡镇名称"] if str(jd).strip() and str(jd) != "nan"]

            logger.info(f"成功加载区划数据: {len(self.qx_list)}个区县，{len(self.jd_list)}个街道")

        except Exception as e:
            logger.error(f"加载区划数据失败: {e}")
            self.local_data = None

    def get_regional_analysis_results(self, task_id: int = None) -> pd.DataFrame:
        """
        获取地址分析任务的结果

        Args:
            task_id: 分析任务ID，如果为None则获取所有结果

        Returns:
            包含地址分析结果的DataFrame
        """
        db = SessionLocal()
        try:
            # 构建查询
            query = db.query(RegionalAnalysisResult)

            if task_id:
                query = query.filter(RegionalAnalysisResult.task_id == task_id)

            # 只获取成功的结果
            query = query.filter(RegionalAnalysisResult.is_deleted == False)

            # 执行查询
            results = query.all()

            if not results:
                logger.warning(f"未找到地址分析结果，task_id={task_id}")
                return pd.DataFrame()

            # 转换为DataFrame
            data = []
            for result in results:
                row = {
                    '事项编号': result.petition_record_id,
                    '任务ID': result.task_id,
                    '初重件标志': result.ccjbz,
                    '姓名': result.tsr,
                    '诉求内容': result.xfnr,
                    '概况': result.ts_gk,
                    '问题属地': result.ts_xzq,
                    '去向单位': result.ts_qxdw,
                    '答复内容': result.ts_dfnr,
                    '备注': result.ts_bz,
                    '投诉对象': result.dzxx_tsdx,
                    '投诉地址': result.dzxx_tsdz,
                    '地址信息_行政区': result.dzxx_ds,
                    '地址信息_街道': result.dzxx_jd,
                    '地址信息_经度': result.dzxx_lng,
                    '地址信息_纬度': result.dzxx_lat,
                    '重复编号': result.cfxs_bh,
                    '概率': result.cfxs_gl
                }
                data.append(row)

            df = pd.DataFrame(data)
            logger.info(f"获取到 {len(df)} 条地址分析结果")
            return df

        except Exception as e:
            logger.error(f"获取地址分析结果失败: {e}")
            return pd.DataFrame()
        finally:
            db.close()

    def get_district_by_record(self, row: pd.Series) -> Tuple[str, str]:
        """
        根据记录中的区县街道信息获取标准区县街道

        Args:
            row: 数据行

        Returns:
            (区县, 街道) 元组
        """
        try:
            qx = str(row.get("区县市", "")).strip()
            jd = str(row.get("乡镇街道", "")).strip()

            if qx in self.qx_list:
                temp_qx = self.local_qx_jd.get(qx, [])
                if jd in temp_qx:
                    return qx, jd
                else:
                    return qx, ""
            else:
                return "", ""
        except Exception as e:
            logger.error(f"获取街道信息异常：{e}")
            return "", ""

    def recognize_local_district(self, row: pd.Series) -> Tuple[str, str]:
        """
        通过标准区县街道进行识别

        Args:
            row: 数据行

        Returns:
            (区县, 街道) 元组
        """
        try:
            qx = str(row.get("新区县", "")).strip()
            jd = str(row.get("新街道", "")).strip()

            if not jd:
                # 合并文本内容进行识别
                xfnr = str(row.get("诉求内容", "")) + str(row.get("概况", "")) + \
                       str(row.get("投诉地址", "")) + str(row.get("问题属地", "")) + \
                       str(row.get("答复内容", ""))

                # 先查找区县
                for temp_qx in self.qx_list:
                    if temp_qx in xfnr:
                        qx = temp_qx
                        temp_jd_list = self.local_qx_jd.get(temp_qx, [])
                        # 再查找街道
                        for temp_jd in temp_jd_list:
                            if temp_jd in xfnr:
                                jd = temp_jd
                                return qx, jd
                        return qx, ""

                # 查找街道反向找区县
                for new_jd in self.jd_list:
                    if new_jd in xfnr:
                        for new_qx, temp_jd_list in self.local_qx_jd.items():
                            if new_jd in temp_jd_list:
                                return new_qx, new_jd

                return qx, jd
            else:
                return qx, jd

        except Exception as e:
            logger.error(f"获取本地街道信息异常：{e}")
            return "", ""

    def extract_duplicate_numbers(self, text: str) -> List[str]:
        """
        提取字符串中以NB、LD、SJ、WX+数字/字母的字符串

        Args:
            text: 输入的字符串

        Returns:
            提取的字符串列表
        """
        if not text or pd.isna(text):
            return []

        # 新调整的正则表达式
        pattern = r'(NB|LD|SJ|WX)[a-zA-Z0-9]{10,}'
        # 使用finditer方法查找所有匹配的字符串
        matches = re.finditer(pattern, str(text))
        # 提取匹配的字符串
        return [match.group() for match in matches]

    def get_duplicate_number(self, row: pd.Series) -> Optional[str]:
        """
        提取重复编号

        Args:
            row: 数据行，包含诉求内容、概况、答复内容

        Returns:
            重复编号或None
        """
        try:
            for i in range(3):
                text_fields = ['诉求内容', '概况', '答复内容']
                if i < len(text_fields):
                    text = row.get(text_fields[i], "")
                    temp = self.extract_duplicate_numbers(text)
                    if len(temp) > 0:
                        return temp[0]
            return None
        except Exception as e:
            logger.error(f"提取重复编号异常：{e}")
            return None

    def get_duplicates_by_number(self, data: pd.DataFrame, result_ls: List = None, xsd_dict: Dict = None) -> Tuple[List, Dict]:
        """
        诉求内容、概况、答复内容中，给出的重复编号，定位重复线索，概率100%

        Args:
            data: 数据DataFrame
            result_ls: 结果列表
            xsd_dict: 线索字典

        Returns:
            (结果列表, 线索字典)
        """
        if result_ls is None:
            result_ls = []
        if xsd_dict is None:
            xsd_dict = {}

        try:
            # 得到重复线索的列表和概率
            mask = data['重复编号'].notna()
            temp = data.loc[mask, ['事项编号', '重复编号']].values
            temp = [[str(elem) for elem in row if pd.notna(elem)] for row in temp]
            result_ls.extend(temp)

            # 概率设置为100%
            temp_flat = list(set([str(elem) for row in temp for elem in row if pd.notna(elem)]))
            for i in temp_flat:
                if i not in xsd_dict:
                    xsd_dict[i] = 1.0

            logger.info(f"通过重复编号找到 {len(temp_flat)} 个重复线索")
            return result_ls, xsd_dict

        except Exception as e:
            logger.error(f"通过重复编号获取重复线索失败：{e}")
            return result_ls, xsd_dict

    def get_duplicates_by_content(self, data: pd.DataFrame, result_ls: List = None, xsd_dict: Dict = None) -> Tuple[List, Dict]:
        """
        诉求内容描述基本相同，概率99%

        Args:
            data: 数据DataFrame
            result_ls: 结果列表
            xsd_dict: 线索字典

        Returns:
            (结果列表, 线索字典)
        """
        if result_ls is None:
            result_ls = []
        if xsd_dict is None:
            xsd_dict = {}

        try:
            temp_ls = []
            duplicated_content = data.loc[data['诉求内容'].duplicated(), '诉求内容'].drop_duplicates()

            for text in duplicated_content:
                if pd.isna(text) or not text:
                    continue
                mask = data['诉求内容'] == text
                if sum(mask) > 1:
                    temp = data.loc[mask, '事项编号'].tolist()
                    result_ls.append(temp)
                    temp_ls.extend(temp)

            # 概率设置为99%
            for i in temp_ls:
                if i not in xsd_dict:
                    xsd_dict[i] = 0.99

            logger.info(f"通过相同内容找到 {len(set(temp_ls))} 个重复线索")
            return result_ls, xsd_dict

        except Exception as e:
            logger.error(f"通过相同内容获取重复线索失败：{e}")
            return result_ls, xsd_dict

    def get_same_name(self, temp_data: pd.DataFrame, temp_dxmc: str, temp_result: List) -> Tuple[List, List]:
        """
        获取相同企业名称的记录

        Args:
            temp_data: 数据DataFrame
            temp_dxmc: 企业名称
            temp_result: 结果列表

        Returns:
            (结果列表, 企业名称列表)
        """
        try:
            temp_result = []
            temp_dxmc_list = []

            for index, row in temp_data.iterrows():
                xh = str(row.get("事项编号", ""))
                t_dxmc = str(row.get("投诉对象", ""))

                if temp_dxmc in t_dxmc:
                    temp_result.append(xh)
                    if t_dxmc not in temp_dxmc_list:
                        temp_dxmc_list.append(t_dxmc)

            return temp_result, temp_dxmc_list

        except Exception as e:
            logger.error(f"获取相同企业名称异常：{e}")
            return [], []

    def get_company_duplicates(self, temp_data: pd.DataFrame, end_words: List[str], result_ls: List = None) -> List:
        """
        通过企业名称识别重复

        Args:
            temp_data: 数据DataFrame
            end_words: 企业后缀词列表
            result_ls: 结果列表

        Returns:
            结果列表
        """
        if result_ls is None:
            result_ls = []

        try:
            # 统计企业名称
            dxmc_obj = temp_data["投诉对象"].value_counts()

            # 构建区县列表用于清理企业名称
            new_qx_list = []
            for qx in self.qx_list:
                new_qx_list.append(qx)
                if len(qx) > 1:
                    new_qx_list.append(qx[:-1])

            dxcm_list = []

            for dxmc in tqdm(dxmc_obj.index, total=len(dxmc_obj), desc="企业名称去重"):
                if pd.isna(dxmc) or not dxmc or dxmc in dxcm_list:
                    continue

                dxmc_str = str(dxmc).strip()

                for word in end_words:
                    if word in dxmc_str:
                        temp_dxmc = dxmc_str.replace(word, "").replace("宁波", "")

                        # 清理区县信息
                        for qx in new_qx_list:
                            temp_dxmc = temp_dxmc.replace(qx, "")

                        if temp_dxmc and len(temp_dxmc) > 3 and temp_dxmc not in ["工地", "建设工地"]:
                            temp_result = list(temp_data.loc[temp_data["投诉对象"] == dxmc_str]["事项编号"])
                            temp_result, temp_dxcm_list = self.get_same_name(temp_data, temp_dxmc, temp_result)
                            dxcm_list.extend(temp_dxcm_list)

                            if len(temp_result) > 1:
                                result_ls.append(temp_result)
                            break

            logger.info(f"通过企业名称找到 {len(result_ls)} 个重复组")
            return result_ls

        except Exception as e:
            logger.error(f"通过企业名称获取重复线索失败：{e}")
            return result_ls

    def merge_lists(self, list_of_lists: List[List]) -> List[List]:
        """
        合并有交集的列表

        Args:
            list_of_lists: 列表的列表

        Returns:
            合并后的列表
        """
        try:
            n = len(list_of_lists)
            if n == 0:
                return []

            # 建立元素到列表索引的映射
            element_to_indices = defaultdict(list)
            for idx, lst in enumerate(list_of_lists):
                for element in set(lst):  # 转换为集合去除重复元素
                    if pd.notna(element) and element:
                        element_to_indices[str(element)].append(idx)

            # 初始化并查集并合并相关索引
            uf = UnionFind(n)
            for indices in element_to_indices.values():
                if len(indices) < 2:
                    continue
                first = indices[0]
                for idx in indices[1:]:
                    uf.union(first, idx)

            # 合并同一组的元素并去重
            merged_groups = defaultdict(set)
            for idx in range(n):
                root = uf.find(idx)
                merged_groups[root].update([str(elem) for elem in list_of_lists[idx] if pd.notna(elem) and elem])

            # 转换为列表返回结果
            return [list(group) for group in merged_groups.values() if len(group) > 1]

        except Exception as e:
            logger.error(f"合并列表失败：{e}")
            return list_of_lists

    def process_address_standardization(self, data: pd.DataFrame) -> pd.DataFrame:
        """
        处理地址标准化

        Args:
            data: 原始数据DataFrame

        Returns:
            处理后的DataFrame
        """
        try:
            # 初始化新区县新街道列
            data['新区县'] = ""
            data['新街道'] = ""

            # 判断是否存在区县市、乡镇街道字段是否存在
            if "区县市" in data.columns and "乡镇街道" in data.columns:
                # 使用现有的区县街道信息
                tqdm.pandas(desc="地址标准化1")
                data[['新区县', '新街道']] = data.progress_apply(
                    self.get_district_by_record, axis=1, result_type="expand"
                )

            # 通过文本识别区县街道
            tqdm.pandas(desc="地址标准化2")
            data[['新区县', '新街道']] = data.progress_apply(
                self.recognize_local_district, axis=1, result_type="expand"
            )

            logger.info(f"地址标准化完成，处理了 {len(data)} 条记录")
            return data

        except Exception as e:
            logger.error(f"地址标准化失败：{e}")
            return data

    def detect_duplicates(self, task_id: int = None) -> pd.DataFrame:
        """
        检测重复投诉

        Args:
            task_id: 分析任务ID，如果为None则分析所有数据

        Returns:
            包含重复检测结果的数据DataFrame
        """
        try:
            logger.info("开始重复投诉检测")

            # 1. 获取地址分析结果
            data = self.get_regional_analysis_results(task_id)
            if data.empty:
                logger.warning("没有找到地址分析结果，无法进行重复检测")
                return pd.DataFrame()

            logger.info(f"获取到 {len(data)} 条记录进行重复检测")

            # 2. 数据预处理
            data = data.replace(np.nan, '')

            # 3. 地址标准化
            data = self.process_address_standardization(data)

            # 4. 提取重复编号
            tqdm.pandas(desc="提取重复编号")
            data['重复编号'] = data.progress_apply(self.get_duplicate_number, axis=1)

            # 5. 检测重复

            # 5.1 通过重复编号检测（概率100%）
            result_ls = []
            xsd_dict = {}
            result_ls, xsd_dict = self.get_duplicates_by_number(data, result_ls, xsd_dict)

            # 5.2 通过相同内容检测（概率99%）
            result_ls, xsd_dict = self.get_duplicates_by_content(data, result_ls, xsd_dict)

            # 5.3 通过企业名称检测（概率95%）
            if "投诉对象" in data.columns:
                # 企业名称后缀词
                end_words = ["公司", "有限公司", "酒店", "公园", "乐园", "游乐园", "城游乐园", "集团"]
                end_words = sorted(end_words, key=len, reverse=True)

                # 按区进行识别
                valid_data = data[(data['新区县'] != '') & (data['新街道'] != '')]
                for qx in self.qx_list:
                    mask = valid_data['新区县'] == qx
                    temp_pd = valid_data.loc[mask]
                    if not temp_pd.empty:
                        result_ls = self.get_company_duplicates(temp_pd, end_words, result_ls)

            # 6. 合并重复组
            merged_results = self.merge_lists(result_ls)
            logger.info(f"合并后得到 {len(merged_results)} 个重复组")

            # 7. 构建重复编号映射
            duplicates_dict = {}
            for group in merged_results:
                for item in group:
                    if item in duplicates_dict:
                        # 合并组
                        existing_group = set(duplicates_dict[item])
                        existing_group.update(group)
                        duplicates_dict[item] = list(existing_group)
                    else:
                        duplicates_dict[item] = group.copy()

            # 8. 更新数据中的重复信息
            data['重复编号'] = data['事项编号'].apply(
                lambda x: ','.join(duplicates_dict.get(str(x), [])) if pd.notna(x) else ''
            )

            # 9. 添加重复概率信息
            def get_probability(item_id):
                item_id = str(item_id)
                return xsd_dict.get(item_id, 0.0)

            data['重复概率'] = data['事项编号'].apply(get_probability)

            logger.info(f"重复投诉检测完成，共检测到 {len(merged_results)} 个重复组")
            return data

        except Exception as e:
            logger.error(f"重复投诉检测失败：{e}")
            return pd.DataFrame()

    def save_results(self, data: pd.DataFrame, output_path: str = None) -> str:
        """
        保存识别结果到本地

        Args:
            data: 结果数据DataFrame
            output_path: 输出路径，如果为None则使用默认路径

        Returns:
            保存的文件路径
        """
        try:
            if output_path is None:
                timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                output_path = os.path.join(
                    os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
                    "results", f"duplicate_detection_result_{timestamp}.xlsx"
                )

            # 确保输出目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)

            # 保存到Excel
            data.to_excel(output_path, index=False)

            # 同时保存为JSON格式
            json_path = output_path.replace('.xlsx', '.json')

            # 转换为可序列化的格式
            json_data = []
            for _, row in data.iterrows():
                json_row = {}
                for col in data.columns:
                    value = row[col]
                    if pd.isna(value):
                        json_row[col] = None
                    elif isinstance(value, (int, float, str, bool)):
                        json_row[col] = value
                    else:
                        json_row[col] = str(value)
                json_data.append(json_row)

            with open(json_path, 'w', encoding='utf-8') as f:
                json.dump(json_data, f, ensure_ascii=False, indent=2)

            logger.info(f"结果已保存到: {output_path} 和 {json_path}")
            return output_path

        except Exception as e:
            logger.error(f"保存结果失败：{e}")
            raise

    def get_summary_statistics(self, data: pd.DataFrame) -> Dict[str, Any]:
        """
        获取检测结果的统计摘要

        Args:
            data: 结果数据DataFrame

        Returns:
            统计摘要字典
        """
        try:
            if data.empty:
                return {"total_records": 0, "duplicates_found": 0, "duplicate_groups": 0}

            total_records = len(data)

            # 统计有重复编号的记录
            has_duplicates = data['检测到的重复编号'].str.len() > 0
            duplicates_found = has_duplicates.sum()

            # 统计重复组数量
            duplicate_groups = set()
            for duplicates in data.loc[has_duplicates, '检测到的重复编号']:
                if duplicates:
                    duplicate_groups.add(duplicates)

            # 按概率统计
            probability_stats = data['重复概率'].value_counts().to_dict()

            # 按区县统计
            district_stats = data['新区县'].value_counts().head(10).to_dict()

            summary = {
                "total_records": total_records,
                "duplicates_found": int(duplicates_found),
                "duplicate_groups": len(duplicate_groups),
                "duplicate_rate": round(duplicates_found / total_records * 100, 2) if total_records > 0 else 0,
                "probability_distribution": probability_stats,
                "top_districts": district_stats,
                "detection_time": datetime.now().isoformat()
            }

            return summary

        except Exception as e:
            logger.error(f"生成统计摘要失败：{e}")
            return {"error": str(e)}


def main():
    """主函数，用于测试"""
    service = DuplicateDetectionService()

    # 检测重复投诉
    result_data = service.detect_duplicates()

    if not result_data.empty:
        # 保存结果
        output_file = service.save_results(result_data)

        # 获取统计摘要
        summary = service.get_summary_statistics(result_data)

        print("重复投诉检测完成！")
        print(f"结果保存到: {output_file}")
        print(f"统计摘要: {json.dumps(summary, ensure_ascii=False, indent=2)}")
    else:
        print("未找到数据或检测失败")


if __name__ == "__main__":
    main()