import joblib
import os
import logging
import pandas as pd
from sklearn.preprocessing import StandardScaler
import csv
class DataStandardizer:
    def __init__(self, numeric_features=None):
        """
        舆情数据标准化处理器
        
        Args:
            numeric_features (list): 需要标准化的数值特征字段列表
        """
        self.numeric_features = numeric_features or [
            'sentiment_score', 'text_length', 'risk_level'
        ]
        self.scaler = StandardScaler()
        self.model_path = 'models/feature_scaler.pkl'
        # 创建日志记录器
        self.logger = logging.getLogger('data_standardizer')
        self.logger.setLevel(logging.INFO)
        # 创建日志格式器
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        # 添加文件处理器
        file_handler = logging.FileHandler('data_standardizer.log')
        file_handler.setFormatter(formatter)
        self.logger.addHandler(file_handler)
        # 添加控制台处理器
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(formatter)
        self.logger.addHandler(console_handler)

    def save_to_csv(self, data, filename):
        """
        安全保存数据到CSV文件
        
        Args:
            data (list): 字典列表形式的数据
            filename (str): 输出文件路径
        """
        try:
            # 确保数据是字典列表
            if not isinstance(data, list) or not all(isinstance(item, dict) for item in data):
                raise ValueError("数据必须是字典列表")
            
            # 动态获取所有字段
            fieldnames = set()
            for item in data:
                fieldnames.update(item.keys())
            
            # 确保目录存在
            os.makedirs(os.path.dirname(filename), exist_ok=True)
            
            # 写入CSV文件
            with open(filename, 'w', newline='', encoding='utf-8-sig') as csvfile:
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                writer.writeheader()
                writer.writerows(data)
            
            self.logger.info(f"成功保存 {len(data)} 条数据到 {filename}")
            return True
        except Exception as e:
            self.logger.error(f"CSV保存失败: {str(e)}")
            return False

    def fit_transform(self, data):
        """
        拟合并转换数据（增强版）
        
        Args:
            data (pd.DataFrame | list[dict]): 输入数据
        """
        # 处理多种输入类型
        if isinstance(data, pd.DataFrame):
            self.logger.info("检测到DataFrame输入，转换为字典列表")
            data = data.to_dict('records')
        elif not isinstance(data, list):
            raise TypeError("输入数据必须是DataFrame或字典列表")
        
        # 确保有足够的数据进行标准化
        if len(data) < 2:
            self.logger.warning("数据量不足，无法进行有效的标准化处理")
            # 添加默认标准化值
            for post in data:
                for feat in self.numeric_features:
                    post[f'{feat}_std'] = 0.0
            return data
        
        # 提取特征值
        feature_matrix = []
        valid_features = []
        
        for post in data:
            row = []
            valid = True
            for feat in self.numeric_features:
                value = post.get(feat, 0)
                try:
                    # 转换为浮点数
                    row.append(float(value))
                except (TypeError, ValueError):
                    # 无效值处理
                    row.append(0.0)
                    valid = False
            
            feature_matrix.append(row)
            valid_features.append(valid)
        
        # 只有所有数值特征都有效的行才用于拟合
        train_matrix = [row for row, valid in zip(feature_matrix, valid_features) if valid]
        
        if len(train_matrix) > 1:
            try:
                # 拟合标准化器
                self.scaler.fit(train_matrix)
                scaled_features = self.scaler.transform(feature_matrix)
                
                # 保存标准化器
                self.save_scaler()
            except Exception as e:
                self.logger.error(f"标准化失败: {str(e)}")
                # 添加默认标准化值
                scaled_features = [[0.0] * len(self.numeric_features)] * len(data)
        else:
            self.logger.warning("有效数据不足，使用默认标准化值")
            scaled_features = [[0.0] * len(self.numeric_features)] * len(data)
        
        # 将标准化后的值写回数据
        for i, post in enumerate(data):
            for j, feat_name in enumerate(self.numeric_features):
                post[f'{feat_name}_std'] = scaled_features[i][j]
        
        return data

    def save_scaler(self):
        """保存标准化器模型"""
        try:
            # 确保模型目录存在
            model_dir = os.path.dirname(self.model_path)
            if not os.path.exists(model_dir):
                os.makedirs(model_dir)
                self.logger.info(f"创建模型目录: {model_dir}")
            
            # 保存模型
            joblib.dump(self.scaler, self.model_path)
            self.logger.info(f"标准化器已保存至 {self.model_path}")
            return True
        except Exception as e:
            self.logger.error(f"保存标准化器失败: {str(e)}")
            return False

    def load_scaler(self):
        """加载标准化器模型"""
        try:
            if not os.path.exists(self.model_path):
                self.logger.warning("标准化器文件不存在")
                return None
            
            self.scaler = joblib.load(self.model_path)
            self.logger.info("标准化器加载成功")
            return self.scaler
        except Exception as e:
            self.logger.error(f"加载标准化器失败: {str(e)}")
            return None

    def transform_new_data(self, new_posts):
        """转换新数据"""
        if self.scaler is None:
            if not self.load_scaler():
                raise ValueError("标准化器未初始化且加载失败")
        
        # 确保输入是字典列表
        if isinstance(new_posts, pd.DataFrame):
            new_posts = new_posts.to_dict('records')
        elif not isinstance(new_posts, list):
            raise TypeError("new_posts 必须是DataFrame或字典列表")
        
        # 提取新数据的特征
        feature_matrix = []
        for post in new_posts:
            row = []
            for feat in self.numeric_features:
                value = post.get(feat, 0)
                try:
                    # 转换为浮点数
                    row.append(float(value))
                except (TypeError, ValueError):
                    # 无效值处理
                    row.append(0.0)
            feature_matrix.append(row)
        
        # 应用标准化转换
        scaled_features = self.scaler.transform(feature_matrix)
        
        # 添加标准化特征
        for i, post in enumerate(new_posts):
            for j, feat_name in enumerate(self.numeric_features):
                post[f'{feat_name}_std'] = scaled_features[i][j]
        
        return new_posts

if __name__ == '__main__':
    import pandas as pd
    
    # 创建数据标准化器
    standardizer = DataStandardizer()
    
    # 读取增强数据
    augmented_posts_path = r'D:\Users\Lenovo\Desktop\作业4\augmented_data.csv'
    try:
        augmented_df = pd.read_csv(augmented_posts_path)
        print(f"成功读取 {len(augmented_df)} 条增强数据")
        
        # 转换为字典列表
        augmented_posts = augmented_df.to_dict('records')
        
        # 应用标准化
        standardized_posts = standardizer.fit_transform(augmented_posts)
        
        # 保存标准化数据
        output_path = r'D:\Users\Lenovo\Desktop\作业4\public_opinion.csv'
        if standardizer.save_to_csv(standardized_posts, output_path):
            print(f"标准化数据已保存至 {output_path}")
        else:
            print("数据保存失败")
            
    except Exception as e:
        print(f"处理失败: {str(e)}")