import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import mean_squared_error, accuracy_score, recall_score, f1_score, precision_score
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
from hmmlearn import hmm
from scipy.stats import mode
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Bidirectional, Dropout, RNN
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint

class MLModels:
    def __init__(self, test_size=0.2, random_state=42):
        self.test_size = test_size
        self.random_state = random_state
        self.scaler = StandardScaler()
        self.label_encoder = LabelEncoder()
        
    def prepare_data(self, X, y):
        # 数据标准化
        X_scaled = self.scaler.fit_transform(X)
        y_encoded = self.label_encoder.fit_transform(y)
        
        # 分割数据集
        X_train, X_test, y_train, y_test = train_test_split(
            X_scaled, y_encoded, 
            test_size=self.test_size, 
            random_state=self.random_state
        )
        return X_train, X_test, y_train, y_test
    
    def linear_regression(self, X, y):
        print("Running Linear Regression...")
        X_train, X_test, y_train, y_test = self.prepare_data(X, y)
        
        # 创建并训练模型
        model = LinearRegression()
        model.fit(X_train, y_train)
        
        # 预测
        y_pred = model.predict(X_test)
        
        # 计算评估指标
        mse = mean_squared_error(y_test, y_pred)
        r2_score = model.score(X_test, y_test)
        
        # 可视化实际值vs预测值
        plt.figure(figsize=(10, 6))
        plt.scatter(y_test, y_pred, alpha=0.5)
        plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'r--', lw=2)
        plt.xlabel('Actual Values')
        plt.ylabel('Predicted Values')
        plt.title('Linear Regression: Actual vs Predicted')
        plt.show()
        
        return {
            'mse': mse,
            'r2_score': r2_score,
            'model': model
        }
    
    def logistic_regression(self, X, y):
        print("Running Logistic Regression...")
        X_train, X_test, y_train, y_test = self.prepare_data(X, y)
        
        # 创建并训练模型
        model = LogisticRegression(max_iter=1000)
        model.fit(X_train, y_train)
        
        # 预测
        y_pred = model.predict(X_test)
        
        # 计算评估指标
        accuracy = accuracy_score(y_test, y_pred)
        precision = precision_score(y_test, y_pred, average='weighted')
        recall = recall_score(y_test, y_pred, average='weighted')
        f1 = f1_score(y_test, y_pred, average='weighted')
        mse = mean_squared_error(y_test, y_pred)
        
        # 可视化混淆矩阵
        self.plot_confusion_matrix(y_test, y_pred, "Logistic Regression")
        
        return {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1': f1,
            'mse': mse,
            'model': model
        }
    
    def hmm_model(self, X, y, n_components=3):
        print("Running HMM...")
        X_train, X_test, y_train, y_test = self.prepare_data(X, y)
        
        # 创建并训练HMM模型
        model = hmm.GaussianHMM(n_components=n_components, covariance_type="full", n_iter=200)
        model.fit(X_train)
        
        # 获取隐藏状态
        hidden_states = model.predict(X_train)
        
        # 映射隐藏状态到标签
        state_to_label = {}
        for state in np.unique(hidden_states):
            state_labels = y_train[hidden_states == state]
            most_common_label = mode(state_labels).mode[0]
            state_to_label[state] = most_common_label
        
        # 预测
        test_states = model.predict(X_test)
        y_pred = np.array([state_to_label[state] for state in test_states])
        
        # 计算评估指标
        accuracy = accuracy_score(y_test, y_pred)
        precision = precision_score(y_test, y_pred, average='weighted')
        recall = recall_score(y_test, y_pred, average='weighted')
        f1 = f1_score(y_test, y_pred, average='weighted')
        mse = mean_squared_error(y_test, y_pred)
        
        # 可视化隐藏状态转移概率
        self.plot_transition_matrix(model.transmat_, "HMM State Transition Probabilities")
        
        return {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1': f1,
            'mse': mse,
            'model': model
        }
    
    def kmeans_clustering(self, X, y=None, n_clusters=3):
        """无监督学习：KMeans聚类"""
        from sklearn.cluster import KMeans
        X_scaled = self.scaler.fit_transform(X)
        kmeans = KMeans(n_clusters=n_clusters, random_state=self.random_state)
        labels = kmeans.fit_predict(X_scaled)
        inertia = kmeans.inertia_
        # 可视化聚类结果（前两个维度）
        plt.figure(figsize=(6, 6))
        plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', s=50)
        plt.title('KMeans Clustering')
        plt.xlabel('Feature 1')
        plt.ylabel('Feature 2')
        plt.show()
        return {'model': 'KMeans', 'inertia': inertia, 'labels': labels}

    def pca_reduction(self, X, y=None, n_components=2):
        """无监督学习：PCA降维"""
        from sklearn.decomposition import PCA
        X_scaled = self.scaler.fit_transform(X)
        pca = PCA(n_components=n_components)
        components = pca.fit_transform(X_scaled)
        explained = pca.explained_variance_ratio_
        # 可视化前两个主成分
        plt.figure(figsize=(6, 6))
        plt.scatter(components[:, 0], components[:, 1], c=y if y is not None else 'b', cmap='viridis', s=50)
        plt.title('PCA Reduction')
        plt.xlabel('PC1')
        plt.ylabel('PC2')
        plt.show()
        return {'model': 'PCA', 'explained_variance_ratio': explained, 'components': components}

    def q_learning(self, X, y=None):
        """强化学习示例：Q-Learning 占位方法"""
        print('Running Q-Learning (placeholder)...')
        # TODO: 实现实际强化学习逻辑
        return {'model': 'Q-Learning', 'info': '功能占位，待实现'}

    def sarsa(self, X, y=None):
        """强化学习示例：SARSA 占位方法"""
        print('Running SARSA (placeholder)...')
        # TODO: 实现实际强化学习逻辑
        return {'model': 'SARSA', 'info': '功能占位，待实现'}
    
    def plot_confusion_matrix(self, y_true, y_pred, title):
        cm = pd.crosstab(y_true, y_pred, rownames=['Actual'], colnames=['Predicted'])
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
        plt.title(title)
        plt.show()
    
    def plot_transition_matrix(self, transition_matrix, title):
        plt.figure(figsize=(10, 8))
        sns.heatmap(transition_matrix, annot=True, cmap='Blues')
        plt.title(title)
        plt.xlabel('To State')
        plt.ylabel('From State')
        plt.show()

class NeuralNetworks:
    def __init__(self, test_size=0.2, random_state=42):
        self.test_size = test_size
        self.random_state = random_state
        self.scaler = MinMaxScaler()
        self.label_encoder = LabelEncoder()
        
    def prepare_sequence_data(self, X, y, timesteps):
        # 数据标准化
        X_scaled = self.scaler.fit_transform(X)
        y_encoded = self.label_encoder.fit_transform(y)
        
        # 创建序列数据
        X_seq = []
        y_seq = []
        for i in range(len(X_scaled) - timesteps):
            X_seq.append(X_scaled[i:(i + timesteps)])
            y_seq.append(y_encoded[i + timesteps])
        
        X_seq = np.array(X_seq)
        y_seq = np.array(y_seq)
        
        # 分割数据集
        return train_test_split(
            X_seq, y_seq,
            test_size=self.test_size,
            random_state=self.random_state
        )
    
    def create_rnn_model(self, input_shape, output_dim):
        model = Sequential([
            RNN(64, input_shape=input_shape, return_sequences=True),
            Dropout(0.2),
            RNN(32),
            Dropout(0.2),
            Dense(output_dim, activation='softmax')
        ])
        return model
    
    def create_bilstm_model(self, input_shape, output_dim):
        model = Sequential([
            Bidirectional(LSTM(64, return_sequences=True), input_shape=input_shape),
            Dropout(0.2),
            Bidirectional(LSTM(32)),
            Dropout(0.2),
            Dense(output_dim, activation='softmax')
        ])
        return model
    
    def train_model(self, model, X_train, y_train, X_test, y_test, model_name):
        # 编译模型
        model.compile(
            optimizer=Adam(learning_rate=0.001),
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy']
        )
        
        # 回调函数
        callbacks = [
            EarlyStopping(patience=5, restore_best_weights=True),
            ModelCheckpoint(
                f'{model_name}_best_model.h5',
                save_best_only=True
            )
        ]
        
        # 训练模型
        history = model.fit(
            X_train, y_train,
            validation_data=(X_test, y_test),
            epochs=50,
            batch_size=32,
            callbacks=callbacks,
            verbose=1
        )
        
        # 预测
        y_pred = np.argmax(model.predict(X_test), axis=-1)
        
        # 计算评估指标
        accuracy = accuracy_score(y_test, y_pred)
        precision = precision_score(y_test, y_pred, average='weighted')
        recall = recall_score(y_test, y_pred, average='weighted')
        f1 = f1_score(y_test, y_pred, average='weighted')
        mse = mean_squared_error(y_test, y_pred)
        
        # 可视化训练过程
        self.plot_training_history(history, model_name)
        
        return {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1': f1,
            'mse': mse,
            'model': model,
            'history': history
        }
    
    def train_rnn(self, X, y, timesteps=30):
        print("Training RNN...")
        X_train, X_test, y_train, y_test = self.prepare_sequence_data(X, y, timesteps)
        
        model = self.create_rnn_model(
            input_shape=(timesteps, X.shape[1]),
            output_dim=len(np.unique(y))
        )
        
        return self.train_model(model, X_train, y_train, X_test, y_test, "RNN")
    
    def train_bilstm(self, X, y, timesteps=30):
        print("Training Bidirectional LSTM...")
        X_train, X_test, y_train, y_test = self.prepare_sequence_data(X, y, timesteps)
        
        model = self.create_bilstm_model(
            input_shape=(timesteps, X.shape[1]),
            output_dim=len(np.unique(y))
        )
        
        return self.train_model(model, X_train, y_train, X_test, y_test, "BiLSTM")
    
    def plot_training_history(self, history, model_name):
        # 绘制损失函数
        plt.figure(figsize=(12, 4))
        
        plt.subplot(1, 2, 1)
        plt.plot(history.history['loss'], label='Training Loss')
        plt.plot(history.history['val_loss'], label='Validation Loss')
        plt.title(f'{model_name} Training and Validation Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        
        plt.subplot(1, 2, 2)
        plt.plot(history.history['accuracy'], label='Training Accuracy')
        plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
        plt.title(f'{model_name} Training and Validation Accuracy')
        plt.xlabel('Epoch')
        plt.ylabel('Accuracy')
        plt.legend()
        
        plt.tight_layout()
        plt.show()

def compare_models(results):
    models = list(results.keys())
    metrics = ['accuracy', 'precision', 'recall', 'f1', 'mse']
    
    # 创建结果数据框
    results_df = pd.DataFrame(columns=metrics, index=models)
    for model in models:
        for metric in metrics:
            if metric in results[model]:
                results_df.loc[model, metric] = results[model][metric]
    
    # 可视化比较结果
    plt.figure(figsize=(15, 10))
    
    for i, metric in enumerate(metrics, 1):
        plt.subplot(2, 3, i)
        if metric in results_df.columns:
            sns.barplot(x=results_df.index, y=results_df[metric])
            plt.title(f'Model Comparison - {metric.upper()}')
            plt.xticks(rotation=45)
            plt.ylabel(metric)
    
    plt.tight_layout()
    plt.show()
    
    return results_df

# 使用示例
def main():
    # 加载数据（示例）
    # data = pd.read_csv('your_data.csv')
    # X = data.drop('target', axis=1)
    # y = data['target']
    
    # 初始化模型
    ml_models = MLModels()
    nn_models = NeuralNetworks()
    
    # 运行所有模型并收集结果
    results = {}
    
    # 机器学习模型
    # results['Linear Regression'] = ml_models.linear_regression(X, y)
    # results['Logistic Regression'] = ml_models.logistic_regression(X, y)
    # results['HMM'] = ml_models.hmm_model(X, y)
    
    # 神经网络模型
    # results['RNN'] = nn_models.train_rnn(X, y)
    # results['BiLSTM'] = nn_models.train_bilstm(X, y)
    
    # 比较模型性能
    # compare_models(results)

if __name__ == "__main__":
    main() 