import pandas as pd
import numpy as np
import re
from datetime import datetime, timedelta
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import networkx as nx
from itertools import combinations

# 设置随机种子以确保结果可重现
np.random.seed(42)

""" 
这段代码实现了完整的信访件重复性分析流程：

1、生成虚拟数据：创建了100条虚拟信访记录，其中包含已知的重复件用于测试
2、数据清洗与标准化：统一文本格式，提取关键信息
3、精确匹配：通过复合键(被举报对象+地点+金额+相关人员)识别完全一致的重复件
4、内容相似度分析：使用TF-IDF和余弦相似度识别内容相似但表述不同的重复件
5、结果整合与评估：合并两种方法的检测结果，并计算精确率、召回率等指标
6、生成报告：输出包含重复组详细信息的Excel报告 
"""

# ==================== 1. 生成虚拟数据 ====================
def generate_mock_data(num_records=100):
    """生成虚拟信访数据"""
    
    # 基础数据池
    reported_entities = [
        "XX科技有限公司", "YY房地产开发公司", "ZZ物业管理有限公司", 
        "AA食品加工厂", "BB建筑有限公司", "CC商贸公司"
    ]
    
    locations = [
        "北京市海淀区", "上海市浦东新区", "广州市天河区", 
        "深圳市南山区", "成都市武侯区", "武汉市江汉区"
    ]
    
    persons = [
        "张三", "李四", "王五", "赵六", "钱七", "孙八", "周九", "吴十"
    ]
    
    problems = [
        "拖欠农民工工资", "违规排放污染物", "违规施工噪音扰民",
        "产品质量不合格", "虚假宣传误导消费者", "违规占用公共用地"
    ]
    
    # 生成重复组模板（这些将用于创建真正的重复件）
    duplicate_templates = []
    for _ in range(10):  # 创建10个重复组模板
        entity = np.random.choice(reported_entities)
        location = np.random.choice(locations)
        person = np.random.choice(persons)
        problem = np.random.choice(problems)
        amount = str(np.random.randint(10, 100) * 1000)
        
        # 为同一事件创建不同表述
        content_variations = [
            f"{entity}在{location}的项目中{problem}，涉及金额{amount}元，负责人{person}",
            f"举报{entity}在{location}{problem}，涉及金额约{amount}元，相关责任人{person}",
            f"{location}的{entity}{problem}，金额{amount}元，涉及{person}",
            f"关于{entity}在{location}{problem}的举报，金额{amount}，负责人{person}"
        ]
        
        duplicate_templates.append({
            "reported_entity": entity,
            "location": location,
            "person": person,
            "problem": problem,
            "amount": amount,
            "content_variations": content_variations
        })
    
    # 生成记录
    records = []
    record_id = 1
    
    # 首先生成重复件
    for template in duplicate_templates:
        # 每个模板生成2-5个重复件
        num_duplicates = np.random.randint(2, 6)
        base_date = datetime(2023, 1, 1) + timedelta(days=np.random.randint(0, 300))
        
        for i in range(num_duplicates):
            variation = np.random.choice(template["content_variations"])
            record_date = base_date + timedelta(days=np.random.randint(0, 90))
            
            records.append({
                "id": record_id,
                "reported_entity": template["reported_entity"],
                "location": template["location"],
                "related_person": template["person"],
                "problem_content": variation,
                "amount": template["amount"],
                "date": record_date.strftime("%Y-%m-%d"),
                "is_duplicate": True  # 标记为重复件，便于后续验证
            })
            record_id += 1
    
    # 然后生成非重复件
    num_non_duplicates = num_records - len(records)
    for _ in range(num_non_duplicates):
        entity = np.random.choice(reported_entities)
        location = np.random.choice(locations)
        person = np.random.choice(persons)
        problem = np.random.choice(problems)
        amount = str(np.random.randint(10, 100) * 1000)
        
        # 随机生成内容，确保不重复
        content = f"{entity}在{location}的活动中{problem}，涉及金额{amount}元，联系人{person}"
        
        record_date = datetime(2023, 1, 1) + timedelta(days=np.random.randint(0, 365))
        
        records.append({
            "id": record_id,
            "reported_entity": entity,
            "location": location,
            "related_person": person,
            "problem_content": content,
            "amount": amount,
            "date": record_date.strftime("%Y-%m-%d"),
            "is_duplicate": False  # 标记为非重复件
        })
        record_id += 1
    
    # 转换为DataFrame并打乱顺序
    df = pd.DataFrame(records)
    df = df.sample(frac=1, random_state=42).reset_index(drop=True)
    
    return df

# 生成100条虚拟记录（实际应用中替换为您的6万条数据）
print("生成虚拟数据...")
df = generate_mock_data(100)
print(f"共生成 {len(df)} 条记录，其中 {df['is_duplicate'].sum()} 条是重复件")

# ==================== 2. 数据清洗与标准化 ====================
def standardize_text(text):
    """标准化文本：去除标点、空格、特殊字符，统一为小写"""
    if pd.isna(text):
        return ''
    text = str(text).lower().strip()
    text = re.sub(r'[^\w\u4e00-\u9fff]', '', text)
    return text

def standardize_amount(amount_str):
    """标准化金额：提取数字部分"""
    if pd.isna(amount_str):
        return ''
    amount_str = str(amount_str)
    # 提取数字
    standardized = re.sub(r'[^\d]', '', amount_str)
    return standardized

print("进行数据清洗与标准化...")
# 应用标准化函数
df['reported_entity_clean'] = df['reported_entity'].apply(standardize_text)
df['location_clean'] = df['location'].apply(standardize_text)
df['related_person_clean'] = df['related_person'].apply(standardize_text)
df['amount_clean'] = df['amount'].apply(standardize_amount)
df['content_clean'] = df['problem_content'].apply(standardize_text)

# 标准化日期
df['date_parsed'] = pd.to_datetime(df['date'], errors='coerce')

# ==================== 3. 创建复合指纹进行精确匹配 ====================
print("创建复合指纹进行精确匹配...")
# 创建复合指纹
df['composite_key'] = (
    df['reported_entity_clean'] + "_" +
    df['location_clean'] + "_" +
    df['amount_clean'] + "_" +
    df['related_person_clean']
)

# 查找精确匹配的重复组
exact_match_groups = df.groupby('composite_key').filter(lambda x: len(x) > 1)
exact_group_ids = set(exact_match_groups['id'])

print(f"通过精确匹配找到 {exact_match_groups['composite_key'].nunique()} 个重复组")

# ==================== 4. 基于内容的相似度分析 ====================
print("进行基于内容的相似度分析...")
# 找出尚未被精确匹配覆盖的记录
remaining_df = df[~df['id'].isin(exact_group_ids)].copy()

# 如果没有剩余记录，跳过此步骤
if len(remaining_df) > 0:
    # 按被举报对象分组，只在同一被举报对象的记录间进行相似度比较
    content_similar_groups = []
    
    for entity, group in remaining_df.groupby('reported_entity_clean'):
        if len(group) < 2:
            continue
            
        # 提取文本内容
        texts = group['content_clean'].tolist()
        indices = group.index.tolist()
        
        # 使用TF-IDF向量化
        vectorizer = TfidfVectorizer()
        try:
            tfidf_matrix = vectorizer.fit_transform(texts)
        except:
            continue  # 跳过无法向量化的组（如所有文本都太短）
        
        # 计算余弦相似度矩阵
        cosine_sim = cosine_similarity(tfidf_matrix, tfidf_matrix)
        
        # 构建相似度图
        G = nx.Graph()
        for i in range(len(indices)):
            G.add_node(indices[i])
            
        # 添加边（相似度高于阈值）
        for i, j in combinations(range(len(indices)), 2):
            if cosine_sim[i, j] > 0.7:  # 相似度阈值
                G.add_edge(indices[i], indices[j], weight=cosine_sim[i, j])
        
        # 找出图中的连通分量（相似组）
        for component in nx.connected_components(G):
            if len(component) > 1:  # 只保留有重复的组
                content_similar_groups.append({
                    'indices': list(component),
                    'entity': entity,
                    'avg_similarity': np.mean([cosine_sim[i, j] for i, j in combinations(range(len(indices)), 2) 
                                             if indices[i] in component and indices[j] in component])
                })
    
    # 提取相似组中的记录ID
    content_similar_ids = set()
    for group in content_similar_groups:
        for idx in group['indices']:
            content_similar_ids.add(remaining_df.loc[idx, 'id'])
    
    print(f"通过内容相似度分析找到 {len(content_similar_groups)} 个重复组")
else:
    content_similar_ids = set()
    print("没有需要内容相似度分析的记录")

# ==================== 5. 整合结果并生成报告 ====================
print("整合结果并生成报告...")
# 合并所有重复件的ID
all_duplicate_ids = exact_group_ids.union(content_similar_ids)

# 标记所有重复件
df['detected_duplicate'] = df['id'].isin(all_duplicate_ids)

# 评估检测效果
true_duplicates = set(df[df['is_duplicate']]['id'])
detected_duplicates = all_duplicate_ids

true_positives = len(true_duplicates.intersection(detected_duplicates))
false_positives = len(detected_duplicates - true_duplicates)
false_negatives = len(true_duplicates - detected_duplicates)

precision = true_positives / (true_positives + false_positives) if (true_positives + false_positives) > 0 else 0
recall = true_positives / (true_positives + false_negatives) if (true_positives + false_negatives) > 0 else 0
f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0

print("\n===== 检测效果评估 =====")
print(f"实际重复件数量: {len(true_duplicates)}")
print(f"检测出的重复件数量: {len(detected_duplicates)}")
print(f"精确率 (Precision): {precision:.2%}")  # 检测出的重复件中有多少是真正的重复件
print(f"召回率 (Recall): {recall:.2%}")      # 真正的重复件中有多少被检测出来了
print(f"F1分数: {f1_score:.4f}")

# 生成重复组详细报告
duplicate_report = []

# 处理精确匹配的组
for key, group in df[df['composite_key'].isin(exact_match_groups['composite_key'])].groupby('composite_key'):
    if len(group) > 1:
        first_date = group['date_parsed'].min()
        last_date = group['date_parsed'].max()
        
        duplicate_report.append({
            'group_id': f"EXACT_{key}",
            'reported_entity': group['reported_entity'].iloc[0],
            'location': group['location'].iloc[0],
            'amount': group['amount'].iloc[0],
            'related_person': group['related_person'].iloc[0],
            'duplicate_count': len(group),
            'first_date': first_date,
            'last_date': last_date,
            'duration_days': (last_date - first_date).days,
            'petition_ids': ', '.join(group['id'].astype(str)),
            'detection_method': '精确匹配'
        })

# 处理内容相似的组 (如果有)
if content_similar_groups:
    for i, group_info in enumerate(content_similar_groups):
        group_indices = group_info['indices']
        group_df = remaining_df.loc[group_indices]
        
        if len(group_df) > 1:
            first_date = group_df['date_parsed'].min()
            last_date = group_df['date_parsed'].max()
            
            duplicate_report.append({
                'group_id': f"SIMILAR_{i}",
                'reported_entity': group_df['reported_entity'].iloc[0],
                'location': group_df['location'].iloc[0],
                'amount': group_df['amount'].iloc[0],
                'related_person': group_df['related_person'].iloc[0],
                'duplicate_count': len(group_df),
                'first_date': first_date,
                'last_date': last_date,
                'duration_days': (last_date - first_date).days,
                'petition_ids': ', '.join(group_df['id'].astype(str)),
                'detection_method': f"内容相似度 (平均: {group_info['avg_similarity']:.3f})"
            })

# 转换为DataFrame
report_df = pd.DataFrame(duplicate_report)

# 输出报告
print(f"\n生成的重复组报告包含 {len(report_df)} 个重复组")
if not report_df.empty:
    print("\n前5个重复组信息:")
    print(report_df[['group_id', 'reported_entity', 'duplicate_count', 'duration_days', 'detection_method']].head())
    
    # 保存详细报告到Excel
    report_df.to_excel('信访重复件分析报告.xlsx', index=False)
    print("\n详细报告已保存到 '信访重复件分析报告.xlsx'")
else:
    print("未检测到任何重复组")

# ==================== 6. 验证结果 ====================
print("\n===== 验证结果 =====")
print("随机检查几个重复组的内容:")
sample_groups = report_df.sample(min(3, len(report_df)), random_state=42)

for _, group in sample_groups.iterrows():
    print(f"\n重复组 {group['group_id']} (检测方法: {group['detection_method']}):")
    print(f"被举报对象: {group['reported_entity']}")
    print(f"重复次数: {group['duplicate_count']}次")
    print(f"持续时间: {group['duration_days']}天")
    
    # 获取该组的所有记录
    ids = list(map(int, group['petition_ids'].split(', ')))
    records = df[df['id'].isin(ids)]
    
    print("该组中的信访内容样本:")
    for _, record in records.head(2).iterrows():
        print(f"  - {record['problem_content']}")