import sys
import logging
import time
from configparser import ConfigParser
from datetime import datetime
from module import DataCollector, NLPProcessor
import pickle
import joblib
# 配置日志系统
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('monitor.log', encoding='utf-8'),
        logging.StreamHandler(sys.stdout)
    ]
)

def main():
    """舆情监控系统主流程"""
    # 加载配置文件
    config = ConfigParser()
    with open('config.ini', 'r', encoding='utf-8') as f:
        config.read_file(f)   
    # 初始化核心模块    
    collector = DataCollector(config['weibo']['cookie'])
    processor = NLPProcessor()  # 确保此类包含synonym_replacement方法


    # ===================================================================
    # 4.2.1 数据清洗与标准化（增强健壮性）
    # ===================================================================
    logging.info("开始采集微博数据...")
    
        # 原始数据采集（增加重试机制）
    raw_posts = []
    for attempt in range(3):  # 最多重试3次
        try:
            raw_posts = collector.crawl_weibo(
                keyword=config['weibo']['keyword'],
                pages=int(config['weibo']['pages'])
            )
            if raw_posts:
                break  # 成功则跳出重试循环
            logging.warning(f"第{attempt+1}次采集返回空数据")
        except Exception as e:
            logging.warning(f"数据采集第{attempt+1}次失败: {str(e)}")
            time.sleep(2)  # 失败后延迟
    
    if not raw_posts:
        logging.error("数据采集失败，无有效数据")
        return
        
    logging.info(f"原始数据量: {len(raw_posts)}条")
    if raw_posts:
        collector.save_raw_data(raw_posts)  # 调用原始数据存储函数
    else:
        logging.warning("无有效原始数据可存储")
        return
    # 数据清洗流程
    posts = []
    seen_ids = set()  # 使用集合加速去重
    for post in raw_posts:
        try:
            # 缺失值处理
            if not post.get('text'):
                post['text'] = '[内容已删除]'
                logging.warning(f"检测到空内容微博ID: {post.get('id')}")
            
            # 重复数据过滤
            post_id = post.get('id')
            if post_id and post_id not in seen_ids:
                seen_ids.add(post_id)
                # 时间标准化
                if 'timestamp' in post:
                    try:
                        post['timestamp'] = datetime.strptime(
                            post['timestamp'], '%Y-%m-%d %H:%M'
                        ).isoformat()
                    except ValueError:
                        post['timestamp'] = datetime.now().isoformat()
                posts.append(post)
        except Exception as e:
            logging.warning(f"微博ID {post.get('id')} 清洗失败: {str(e)}")
            continue
    
    # 安全计算去重率（避免除零错误）
    if raw_posts:
        dup_rate = (len(raw_posts) - len(posts)) / len(raw_posts)
        logging.info(f"清洗后数据量: {len(posts)}条 (去重率: {dup_rate:.1%})")
    else:
        logging.warning("原始数据量为0，跳过去重率计算")

    logging.info("开始特征提取...")
    # ===================================================================
    # 4.2.2 特征提取与表示（增强健壮性）
    # ===================================================================
    
    for i, post in enumerate(posts):
        try:
            # 文本特征提取
            post['keywords'] = processor.extract_keywords(post['text'])
            
            # 情感特征（安全访问）
            sentiment_result = processor.sentiment_analysis(post['text'])
            post['sentiment'] = sentiment_result.get('score', 0.0)
            post['sentiment_magnitude'] = sentiment_result.get('magnitude', 0.0)
            
            # 分类特征编码
            post['alert_type'] = processor.classify_alert_type(post['text'])
            
            # 元特征提取
            post['text_length'] = len(post['text'])
            post['has_media'] = 'media' in post and post['media']
            
            # 进度日志（每处理50条输出一次）
            if i % 50 == 0:
                logging.info(f"已处理 {i}/{len(posts)} 条数据")
        except Exception as e:
            logging.warning(f"微博ID {post.get('id')} 特征提取失败: {str(e)}")
            # 设置默认值保证后续流程
            post.setdefault('keywords', [])
            post.setdefault('sentiment', 0.0)
            post.setdefault('sentiment_magnitude', 0.0)
            post.setdefault('alert_type', '其他')
            continue
    
    # ===================================================================
    # 4.2.3 数据增强（核心修复点）
    # ===================================================================
    logging.info("执行数据增强...")
    augmented_posts = []
    
    # 1. 保留原始数据
    augmented_posts.extend(posts)
    
    # 2. 对高风险内容进行增强
    enhancement_count = 0
    for post in posts:
        try:
            # 只对含高风险关键词的内容进行增强
            high_risk_keywords = ['曝光', '举报', '维权', '暴力', '袭警']
            if any(kw in high_risk_keywords for kw in post['keywords']):
                # 执行同义词替换增强
                augmented_text = processor.synonym_replacement(post['text'], post['keywords'])
                
                # 创建增强后的数据副本
                if augmented_text != post['text']:
                    new_post = post.copy()
                    new_post['text'] = augmented_text
                    new_post['augmentation'] = 'synonym_replacement'
                    augmented_posts.append(new_post)
                    enhancement_count += 1
                    
                    # 为增强后的数据重新计算特征
                    new_post['keywords'] = processor.extract_keywords(new_post['text'])
                    sentiment_result = processor.sentiment_analysis(new_post['text'])
                    new_post['sentiment'] = sentiment_result.get('score', 0.0)
                    new_post['alert_type'] = processor.classify_alert_type(new_post['text'])
                    
        except Exception as e:
            logging.warning(f"微博ID {post.get('id')} 数据增强失败: {str(e)}")
    
    # 更新数据引用
    posts = augmented_posts
    logging.info(f"增强后数据量: {len(posts)}条 (+{enhancement_count}增强样本)")

    # ===================================================================
    # 4.2.4 数据标准化（新增环节）
    # ===================================================================
    logging.info("执行数据标准化...")

    # 待标准化的数值特征列表
    numeric_features = ['sentiment', 'sentiment_magnitude', 'text_length']

    # 1. 特征值提取
    feature_matrix = []
    for post in posts:
        feature_row = [post[feat] for feat in numeric_features]
        feature_matrix.append(feature_row)

    # 2. 初始化标准化器
    from sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()

    # 3. 标准化计算
    scaled_features = scaler.fit_transform(feature_matrix)

    # 4. 将标准化后的值写回数据
    for i, post in enumerate(posts):
        for j, feat_name in enumerate(numeric_features):
            post[f'{feat_name}_std'] = scaled_features[i][j]

    logging.info(f"完成{len(numeric_features)}个特征的标准化")

    # 5. 安全保存标准化器
    import joblib
    import os

    # 确保模型目录存在
    model_dir = 'models'
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
        logging.info(f"创建模型目录: {model_dir}")

    scaler_path = os.path.join(model_dir, 'feature_scaler.pkl')

    try:
        joblib.dump(scaler, scaler_path)
        logging.info(f"标准化器已保存至 {scaler_path}")
    except Exception as e:
        logging.error(f"保存标准化器失败: {str(e)}")
        # 临时保存到当前目录作为备份
        try:
            joblib.dump(scaler, 'feature_scaler_backup.pkl')
            logging.warning("标准化器已备份到当前目录")
        except:
            logging.error("标准化器备份失败")
    import joblib
    import numpy as np

    # 加载标准化器
    scaler = joblib.load('models/feature_scaler.pkl')

    # 查看标准化器内容
    print("="*50)
    print("标准化器基本信息:")
    print(f"特征数量: {scaler.n_features_in_}")
    print(f"均值: {np.round(scaler.mean_, 4)}")
    print(f"标准差: {np.round(scaler.scale_, 4)}")

    # 假设你的特征顺序是：sentiment, sentiment_magnitude, text_length
    feature_names = ['sentiment', 'sentiment_magnitude', 'text_length']
    for i, name in enumerate(feature_names):
        print(f"\n特征: {name}")
        print(f"  均值: {scaler.mean_[i]:.4f}")
        print(f"  标准差: {scaler.scale_[i]:.4f}")

    print("\n示例转换: sentiment = 0.8")
    original = np.array([[0.8, 0, 0]])  # 其它特征用0填充
    scaled = scaler.transform(original)
    print(f"标准化值: {scaled[0][0]:.4f}")
    # ===================================================================
    # 数据存储
    # ===================================================================
    collector.save_to_database(posts)
    logging.info(f"成功存储{len(posts)}条数据到数据库")
if __name__ == "__main__":
    main()