import random
import logging
import jieba
import pandas as pd
import os
import csv

class DataAugmenter:
    def __init__(self, high_risk_keywords=None):
        """初始化数据增强器"""
        self.high_risk_keywords = high_risk_keywords or ['曝光', '举报', '维权', '暴力', '袭警']
        self.police_terms = {
            "警察": ["执勤", "巡逻", "盘查", "出警"],
            "公安": ["行动", "通告", "联合执法", "专项治理"],
            "袭警": ["暴力抗法", "持械袭击", "围攻"],
            "执法": ["现场", "录像", "全过程", "文明"],
            "举报": ["实名", "匿名", "线索", "核查"],
            "舆情": ["发酵", "管控", "引导", "通报"]
        }
    
    def augment(self, posts):
        """数据增强主方法（增强版）"""
        # 确保输入是字典列表
        if isinstance(posts, pd.DataFrame):
            posts = posts.to_dict('records')
            logging.info("转换DataFrame为字典列表")
        
        if not isinstance(posts, list):
            raise ValueError("posts 必须是字典列表或DataFrame")
        
        augmented_posts = posts.copy()  # 保留原始数据
        augmentation_count = 0
        
        for post in posts:
            try:
                # 确保post是字典类型
                if not isinstance(post, dict):
                    logging.warning("跳过非字典类型数据")
                    continue
                
                # 检查是否是高风险内容
                if self.is_high_risk(post):
                    augmented_post = self.apply_augmentation(post)
                    if augmented_post:
                        augmented_posts.append(augmented_post)
                        augmentation_count += 1
            except Exception as e:
                logging.warning(f"数据增强失败: {str(e)}")
        
        logging.info(f"增强完成: 原始数据 {len(posts)} 条, 增强后 {len(augmented_posts)} 条 (+{augmentation_count} 增强样本)")
        return augmented_posts
    
    def is_high_risk(self, post):
        """判断是否是高风险内容（增强版）"""
        # 获取关键词列表
        keywords = post.get('keywords', [])
        if isinstance(keywords, str):
            # 如果是字符串，尝试分割
            keywords = keywords.split(';') if ';' in keywords else [keywords]
        
        # 检查是否包含高风险关键词
        return any(kw in self.high_risk_keywords for kw in keywords)
    
    def apply_augmentation(self, post):
        """应用增强技术（增强版）"""
        try:
            # 创建增强副本
            new_post = post.copy()
            
            # 同义词替换增强
            text = post.get('text', '')
            keywords = post.get('keywords', [])
            
            # 确保keywords是列表
            if isinstance(keywords, str):
                keywords = keywords.split(';') if ';' in keywords else [keywords]
            
            new_text = self.synonym_replacement(text, keywords)
            if new_text != text:
                new_post['text'] = new_text
                new_post['augmentation'] = 'synonym_replacement'
                return new_post
            return None
        except Exception as e:
            logging.warning(f"应用增强失败: {str(e)}")
            return None
    
    def synonym_replacement(self, text, keywords):
        """同义词替换（增强版）"""
        if not text or not isinstance(text, str):
            return text
        
        # 使用分词确保准确性
        words = list(jieba.cut(text))
        
        # 过滤有效关键词（在警务术语字典中存在）
        valid_keywords = [kw for kw in keywords if kw in self.police_terms]
        
        if not valid_keywords:
            return text
        
        # 随机选择一个关键词进行增强
        anchor_word = random.choice(valid_keywords)
        replace_word = random.choice(self.police_terms[anchor_word])
        
        # 替换第一次出现的关键词
        replaced = False
        for i, word in enumerate(words):
            if word == anchor_word:
                words[i] = replace_word
                replaced = True
                break
        
        return ''.join(words) if replaced else text

def save_augmented_data_to_csv(data, filename):
    """安全保存增强数据到CSV"""
    try:
        # 确保数据是字典列表
        if not isinstance(data, list) or not all(isinstance(item, dict) for item in data):
            raise ValueError("数据必须是字典列表")
        
        # 定义CSV字段
        fieldnames = [
            'id', 'text', 'user', 'reposts', 'comments', 'likes',
            'timestamp', 'platform', 'sentiment_score', 'sentiment_label',
            'keywords', 'alert_type', 'risk_level', 'text_length', 'augmentation'
        ]
        
        # 确保目录存在
        os.makedirs(os.path.dirname(filename), exist_ok=True)
        
        # 写入CSV文件
        with open(filename, 'w', newline='', encoding='utf-8-sig') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
            
            for item in data:
                # 安全处理所有字段
                row = {
                    'id': str(item.get('id', '')),
                    'text': str(item.get('text', '')),
                    'user': str(item.get('user', '')),
                    'reposts': int(item.get('reposts', 0)),
                    'comments': int(item.get('comments', 0)),
                    'likes': int(item.get('likes', 0)),
                    'timestamp': str(item.get('timestamp', '')),
                    'platform': str(item.get('platform', 'weibo')),
                    'sentiment_score': float(item.get('sentiment_score', 0.5)),
                    'sentiment_label': str(item.get('sentiment_label', 'neutral')),
                    'keywords': ';'.join(item.get('keywords', [])) if isinstance(item.get('keywords', []), list) else str(item.get('keywords', '')),
                    'alert_type': str(item.get('alert_type', '其他')),
                    'risk_level': int(item.get('risk_level', 0)),
                    'text_length': int(item.get('text_length', 0)),
                    'augmentation': str(item.get('augmentation', ''))
                }
                writer.writerow(row)
        
        logging.info(f"成功保存 {len(data)} 条增强数据到 {filename}")
        return True
    except Exception as e:
        logging.error(f"增强数据保存失败: {str(e)}")
        return False

if __name__ == '__main__':
    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler('data_augmenter.log'),
            logging.StreamHandler()
        ]
    )
    
    augmenter = DataAugmenter()
    extractor_posts_path = r'D:\Users\Lenovo\Desktop\作业4\extractor_data.csv'
    
    try:
        # 读取特征提取后的数据
        extractor_df = pd.read_csv(extractor_posts_path)
        logging.info(f"成功读取 {len(extractor_df)} 条特征数据")
        
        # 转换为字典列表
        extractor_posts = extractor_df.to_dict('records')
        
        # 执行数据增强
        augmented_posts = augmenter.augment(extractor_posts)
        logging.info(f"增强后数据量: {len(augmented_posts)} 条")
        
        # 保存增强数据
        output_path = r'D:\Users\Lenovo\Desktop\作业4\augmented_data.csv'
        if save_augmented_data_to_csv(augmented_posts, output_path):
            logging.info(f"增强数据已保存至 {output_path}")
        else:
            logging.error("增强数据保存失败")
            
    except Exception as e:
        logging.error(f"处理失败: {str(e)}")
