import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba
from gensim.models import Word2Vec


class DataProcessor:
    def __init__(self):
        self.data = None
        self.numeric_columns = []
        self.text_columns = []
        self.processing_log = []

    def load_data(self, file_path):
        """加载数据文件（CSV或Excel）"""
        try:
            if file_path.endswith('.csv'):
                self.data = pd.read_csv(file_path)
            elif file_path.endswith(('.xlsx', '.xls')):
                self.data = pd.read_excel(file_path)
            else:
                raise ValueError("不支持的文件格式")
            
            # 分类数值列和文本列
            self.numeric_columns = self.data.select_dtypes(include=[np.number]).columns
            self.text_columns = self.data.select_dtypes(include=['object']).columns
            
            return "数据加载成功"
        except Exception as e:
            return f"数据加载失败: {str(e)}"

    def clean_numeric_data(self, method):
        """清理数值数据"""
        try:
            if method == "删除缺失值":
                before_count = len(self.data)
                self.data = self.data.dropna()
                after_count = len(self.data)
                self.processing_log.append(f"删除缺失值: 从{before_count}行减少到{after_count}行")
                
            elif method == "均值填充":
                for col in self.numeric_columns:
                    mean_value = self.data[col].mean()
                    self.data[col].fillna(mean_value, inplace=True)
                    self.processing_log.append(f"列{col}使用均值{mean_value:.2f}填充缺失值")
                    
            elif method == "中位数填充":
                for col in self.numeric_columns:
                    median_value = self.data[col].median()
                    self.data[col].fillna(median_value, inplace=True)
                    self.processing_log.append(f"列{col}使用中位数{median_value:.2f}填充缺失值")
                    
            # 去除重复数据
            before_count = len(self.data)
            self.data = self.data.drop_duplicates()
            after_count = len(self.data)
            self.processing_log.append(f"去除重复数据: 从{before_count}行减少到{after_count}行")
            
            # 处理异常值（使用Z-score方法）
            for col in self.numeric_columns:
                z_scores = np.abs((self.data[col] - self.data[col].mean()) / self.data[col].std())
                self.data = self.data[z_scores < 3]
                self.processing_log.append(f"列{col}使用Z-score方法(|z| < 3)处理异常值")
            
            return "数值数据清理成功"
        except Exception as e:
            return f"数值数据清理失败: {str(e)}"

    def clean_text_data(self, method):
        """清理文本数据"""
        try:
            if method == "分词":
                for col in self.text_columns:
                    self.data[f"{col}_分词"] = self.data[col].apply(lambda x: ' '.join(jieba.cut(str(x))))
                    self.processing_log.append(f"列{col}进行分词处理")
                    
            elif method == "去除停用词":
                stopwords = set(['的', '了', '和', '是', '就', '都', '而', '及', '与', '着'])
                for col in self.text_columns:
                    self.data[f"{col}_清洗"] = self.data[col].apply(
                        lambda x: ' '.join([word for word in jieba.cut(str(x)) if word not in stopwords])
                    )
                    self.processing_log.append(f"列{col}去除停用词")
                    
            elif method == "格式统一化":
                for col in self.text_columns:
                    self.data[col] = self.data[col].str.lower()  # 转小写
                    self.data[col] = self.data[col].str.strip()  # 去除首尾空格
                    self.processing_log.append(f"列{col}进行格式统一化处理")
            
            return "文本数据清理成功"
        except Exception as e:
            return f"文本数据清理失败: {str(e)}"

    def extract_features(self, method, params=None):
        """特征提取"""
        try:
            if method == "PCA":
                pca = PCA(n_components=params.get('n_components', 2))
                numeric_data = self.data[self.numeric_columns]
                scaler = StandardScaler()
                scaled_data = scaler.fit_transform(numeric_data)
                pca_result = pca.fit_transform(scaled_data)
                
                explained_variance = pca.explained_variance_ratio_
                self.processing_log.append(f"PCA降维: 解释方差比例 = {explained_variance}")
                
                for i in range(pca_result.shape[1]):
                    self.data[f'PCA_{i+1}'] = pca_result[:, i]
                
            elif method == "LDA":
                if 'target' not in params:
                    raise ValueError("LDA需要指定目标变量")
                    
                lda = LinearDiscriminantAnalysis(n_components=params.get('n_components', 2))
                numeric_data = self.data[self.numeric_columns]
                target = self.data[params['target']]
                lda_result = lda.fit_transform(numeric_data, target)
                
                for i in range(lda_result.shape[1]):
                    self.data[f'LDA_{i+1}'] = lda_result[:, i]
                    
            elif method == "TF-IDF":
                for col in self.text_columns:
                    tfidf = TfidfVectorizer(max_features=params.get('max_features', 100))
                    tfidf_result = tfidf.fit_transform(self.data[col].astype(str))
                    feature_names = tfidf.get_feature_names_out()
                    
                    for i, feature in enumerate(feature_names):
                        self.data[f'{col}_TFIDF_{feature}'] = tfidf_result[:, i].toarray()
                        
            elif method == "Word2Vec":
                for col in self.text_columns:
                    # 分词
                    sentences = [jieba.lcut(str(text)) for text in self.data[col]]
                    # 训练Word2Vec模型
                    model = Word2Vec(sentences, vector_size=params.get('vector_size', 100),
                                  window=5, min_count=1, workers=4)
                    
                    # 计算每个文本的平均词向量
                    def get_mean_vector(words):
                        vectors = [model.wv[word] for word in words if word in model.wv]
                        if vectors:
                            return np.mean(vectors, axis=0)
                        return np.zeros(params.get('vector_size', 100))
                    
                    word2vec_features = [get_mean_vector(text) for text in sentences]
                    for i in range(params.get('vector_size', 100)):
                        self.data[f'{col}_W2V_{i}'] = [vec[i] for vec in word2vec_features]
            
            return f"{method}特征提取成功"
        except Exception as e:
            return f"{method}特征提取失败: {str(e)}"

    def get_processing_log(self):
        """获取处理日志"""
        return "\n".join(self.processing_log)

    def get_data_preview(self, rows=5):
        """获取数据预览"""
        return self.data.head(rows).to_string()

    def get_data_info(self):
        """获取数据信息"""
        info = []
        info.append(f"数据形状: {self.data.shape}")
        info.append(f"数值列: {list(self.numeric_columns)}")
        info.append(f"文本列: {list(self.text_columns)}")
        info.append("\n数据描述统计:")
        info.append(self.data.describe().to_string())
        return "\n".join(info) 