import jieba
import jieba.analyse
from snownlp import SnowNLP
import logging
import pandas as pd
import os
import re
import csv
class FeatureExtractor:
    def __init__(self, stopword_file=r"D:\Users\Lenovo\Desktop\作业4\Stopwords_Chinese.txt"):
        self.stopwords = self.load_stopwords(stopword_file)
        # 加载警务术语
        self.police_terms = self.load_police_terms()
    
    def load_stopwords(self, filepath):
        """加载停用词表"""
        try:
            with open(filepath, 'r', encoding='utf-8') as f:
                return set(line.strip() for line in f if line.strip())
        except Exception as e:
            logging.error(f"加载停用词失败: {str(e)}")
            return set()
    
    def load_police_terms(self):
        """加载警务术语"""
        try:
            # 从文件中加载警务术语
            terms_path = r"D:\Users\Lenovo\Desktop\作业4\police_terms.txt"
            if os.path.exists(terms_path):
                with open(terms_path, "r", encoding='utf-8') as f:
                    return [line.strip() for line in f if line.strip()]
            return []
        except Exception as e:
            logging.error(f"加载警务术语失败: {str(e)}")
            return []
    
    def extract_features(self, post):
        """从单条帖子提取特征（增强版）"""
        # 确保post是字典类型
        if not isinstance(post, dict):
            logging.warning(f"非字典类型数据: {type(post)}")
            return self.default_features()
        
        try:
            # 确保文本存在
            text = post.get('text', '')
            if not text or not isinstance(text, str):
                text = ''
            
            # 关键词提取
            keywords = self.extract_keywords(text)
            
            # 情感分析
            sentiment = self.analyze_sentiment(text)
            
            # 涉警事件分类
            alert_info = self.classify_alert_type(text)
            
            # 返回特征字典
            return {
                'id': post.get('id', ''),
                'text': text,
                'keywords': keywords,
                'sentiment_score': sentiment['sentiment_score'],
                'sentiment_label': sentiment['sentiment_label'],
                'alert_type': alert_info['type'],
                'risk_level': alert_info['risk_level'],
                'text_length': len(text),
                'has_media': bool(post.get('has_media', False))
            }
        except Exception as e:
            logging.warning(f"特征提取失败: {str(e)}")
            return self.default_features()
    
    def default_features(self):
        """默认特征值"""
        return {
            'id': '',
            'text': '',
            'keywords': [],
            'sentiment_score': 0.5,
            'sentiment_label': 'neutral',
            'alert_type': '其他',
            'risk_level': 0,
            'text_length': 0,
            'has_media': False
        }
    
    def extract_keywords(self, text, topK=10):
        """提取关键词（增强版）"""
        if not text or not isinstance(text, str):
            return []
        
        try:
            # 移除特殊字符
            clean_text = re.sub(r'[^\w\u4e00-\u9fa5]', ' ', text)
            
            # 使用TextRank算法提取关键词
            keywords = jieba.analyse.textrank(
                clean_text, 
                topK=topK*2, 
                withWeight=False, 
                allowPOS=('n', 'vn', 'v', 'a')
            )
            
            # 过滤停用词
            return [kw for kw in keywords if kw not in self.stopwords][:topK]
        except:
            return []
    
    def analyze_sentiment(self, text):
        """情感分析（增强版）"""
        if not text or not isinstance(text, str):
            return {'sentiment_score': 0.5, 'sentiment_label': 'neutral'}
        
        try:
            s = SnowNLP(text)
            score = s.sentiments
            
            # 更精细的情感标签
            if score > 0.7:
                label = 'positive'
            elif score > 0.6:
                label = 'slightly_positive'
            elif score < 0.3:
                label = 'negative'
            elif score < 0.4:
                label = 'slightly_negative'
            else:
                label = 'neutral'
                
            return {
                'sentiment_score': round(score, 4),
                'sentiment_label': label
            }
        except:
            return {'sentiment_score': 0.5, 'sentiment_label': 'neutral'}
    
    def classify_alert_type(self, text):
        """涉警事件分类（增强版）"""
        if not text or not isinstance(text, str):
            return {'type': '其他', 'risk_level': 0}
        
        try:
            # 检查是否包含警务术语
            found_terms = [term for term in self.police_terms if term in text]
            
            if found_terms:
                # 根据找到的术语数量确定风险级别
                risk_level = min(len(found_terms), 5)  # 最高风险级别为5
                return {'type': '涉警事件', 'risk_level': risk_level}
            
            return {'type': '普通舆情', 'risk_level': 0}
        except:
            return {'type': '其他', 'risk_level': 0}

def save_feature_data_to_csv(data, filename):
    """安全保存特征数据到CSV"""
    try:
        # 确保数据是字典列表
        if not isinstance(data, list) or not all(isinstance(item, dict) for item in data):
            raise ValueError("数据必须是字典列表")
        
        # 定义CSV字段
        fieldnames = [
            'id', 'text', 'keywords', 'sentiment_score', 'sentiment_label',
            'alert_type', 'risk_level', 'text_length', 'has_media'
        ]
        
        # 确保目录存在
        os.makedirs(os.path.dirname(filename), exist_ok=True)
        
        # 写入CSV文件
        with open(filename, 'w', newline='', encoding='utf-8-sig') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
            
            for item in data:
                # 安全处理所有字段
                row = {
                    'id': str(item.get('id', '')),
                    'text': str(item.get('text', '')),
                    'keywords': ';'.join(item.get('keywords', [])),
                    'sentiment_score': float(item.get('sentiment_score', 0.5)),
                    'sentiment_label': str(item.get('sentiment_label', 'neutral')),
                    'alert_type': str(item.get('alert_type', '其他')),
                    'risk_level': int(item.get('risk_level', 0)),
                    'text_length': int(item.get('text_length', 0)),
                    'has_media': int(bool(item.get('has_media', False)))
                }
                writer.writerow(row)
        
        logging.info(f"成功保存 {len(data)} 条特征数据到 {filename}")
        return True
    except Exception as e:
        logging.error(f"特征数据保存失败: {str(e)}")
        return False

if __name__ == '__main__':
    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler('feature_extractor.log'),
            logging.StreamHandler()
        ]
    )
    
    extractor = FeatureExtractor()
    cleaned_posts_path = r'D:\Users\Lenovo\Desktop\作业4\clean_data.csv'
    
    try:
        # 读取清洗后的数据
        cleaned_df = pd.read_csv(cleaned_posts_path)
        logging.info(f"成功读取 {len(cleaned_df)} 条清洗数据")
        
        # 转换为字典列表
        cleaned_posts = cleaned_df.to_dict('records')
        
        # 提取特征
        feature_data = []
        for i, post in enumerate(cleaned_posts):
            if i % 10 == 0:
                logging.info(f"正在处理第 {i+1}/{len(cleaned_posts)} 条数据")
            feature_data.append(extractor.extract_features(post))
        
        # 保存特征数据
        output_path = r'D:\Users\Lenovo\Desktop\作业4\extractor_data.csv'
        if save_feature_data_to_csv(feature_data, output_path):
            logging.info(f"特征数据已保存至 {output_path}")
        else:
            logging.error("特征数据保存失败")
            
    except Exception as e:
        logging.error(f"处理失败: {str(e)}")