from copy import deepcopy
import os
import pickle
import numpy as np
from sklearn.cluster import MeanShift
from sklearn.metrics import mean_absolute_error, r2_score
from sklearn.discriminant_analysis import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
from torch.utils.data import DataLoader, TensorDataset
import torch
import torch.nn as nn
import pandas as pd
from tqdm import tqdm
import warnings

data = pd.read_csv('data.csv')
data_val = pd.read_csv('data_val.csv')
data = data.fillna(0)
data_val = data_val.fillna(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

warnings.filterwarnings("ignore")
class SVMCluster:
    def __init__(self, X, y):
        self.X = X
        self.y = y
        self.preprocessor = StandardScaler()
        self.X = self.preprocessor.fit_transform(self.X)
        
    def cluster(self) -> np.array:
        p = MeanShift(bandwidth=1)
        p.fit(self.X)
        print("Number of clusters before augmentation:", len(np.unique(p.labels_)))
        newX = deepcopy(self.X)
        newy = deepcopy(self.y)
        for i in range(len(np.unique(p.labels_))):
            if len(self.X[p.labels_ == i]) < 5:
                for j in range(len(p.labels_)):
                    
                    if p.labels_[j] == i:
                        # 随机生成点
                        for _ in range(10):   
                            newX = np.append(newX, self.X[j]+np.random.uniform(low=-0.16, high=0.16, size=(1, self.X.shape[1])), axis=0)
                            newy = np.append(newy, self.y[j]+np.random.uniform(low=-100000, high=100000, size=(1)), axis=0)
        #重新训练
        p = MeanShift(bandwidth=1)
        p.fit(newX)
        self.X = newX
        self.y = newy
        labels = p.labels_
        #print("Number of clusters after augmentation:", len(np.unique(labels)))
        self.save_model(p, 'MLP-SVMC Model/MeanShift.pkl')
        return labels
    
    def optim(self, X, y):
        regressor = SVR()
        parameters = {
            'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
            'C': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
            'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],
            'epsilon': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],
            'degree': [1, 2, 3, 4, 5],
            'coef0': [0, 0.1, 0.2, 0.3]
        }
        clf = GridSearchCV(regressor, parameters, cv=3, n_jobs=-1, scoring='neg_mean_squared_error')
        clf.fit(X, y)
        best_params = clf.best_params_
        #print(f"Best parameters: {best_params}")
        return best_params

    def train(self):
        ms = self.load_model('MLP-SVMC Model/MeanShift.pkl')
        labels = ms.predict(self.X)
        numbers = len(np.unique(labels))

        for i in tqdm(range(numbers)):
            X = self.X[labels == i]
            y = self.y[labels == i]
            if len(X) >= 4:
                param = self.optim(X, y)
                regressor = SVR(**param)
                regressor.fit(X, y)
                self.save_model(regressor, f'MLP-SVMC Model/SVMmodel_{i}.pkl')
            else:
                print(f"Not enough samples for cluster {i}, skipping training.")
                
    def predict(self, X):
        X = self.preprocessor.transform(X)
        labels = self.load_model('MLP-SVMC Model/MeanShift.pkl').predict(X)
        y_pred = np.zeros(len(X))
        for i in range(np.max(labels)+1):
            #print(f'predicting label {i}')
            try:
                model = self.load_model(f'MLP-SVMC Model/SVMmodel_{i}.pkl')
                
                indices = labels == i
                if np.any(indices):
                    y_pred[indices] = model.predict(X[indices])
                    
            except FileNotFoundError:
                print(f"No model found for label {i}")
                # 使用平均值填充
                y_pred[indices] = np.mean(y_pred[y_pred != 0])
        return y_pred
            
    def save_model(self, model, name):
        with open(name, 'wb') as f:
            pickle.dump(model, f)
            
    def load_model(self, name):
        if os.path.exists(name):
            with open(name, 'rb') as f:
                model = pickle.load(f)
        else:
            raise FileNotFoundError(f"Model file {name} not found")
        return model 
           
class MLP(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(MLP, self).__init__()
        
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        
        self.dropout = nn.Dropout(0.25)
        self.leakyrelu = nn.LeakyReLU()
        
        self.fc1 = nn.Linear(self.input_size, self.hidden_size)
        self.fc2 = nn.Linear(self.hidden_size, self.hidden_size) 
        self.fc3 = nn.Linear(self.hidden_size, self.hidden_size)
        self.fc4 = nn.Linear(self.hidden_size, self.output_size)
        
    def forward(self, x):
        out = self.fc1(x)
        out = self.leakyrelu(out)
        out = self.fc2(out)
        out = self.leakyrelu(out)
        out = self.dropout(out)
        out = self.fc3(out)
        out = self.leakyrelu(out)
        out = self.fc4(out)
        return out
    
def MLP_train(data):

    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].values

    # 数据预处理
    scaler_X = StandardScaler()
    scaler_y = MinMaxScaler()
    X = scaler_X.fit_transform(X)
    y = scaler_y.fit_transform(y.reshape(-1, 1)).flatten()
    
    train_X = X[:int(0.8 * len(X))]
    train_y = y[:int(0.8 * len(y))]
    test_X = X[int(0.8 * len(X)):]
    test_y = y[int(0.8 * len(y)):]
    torch_X = torch.tensor(train_X, dtype=torch.float32).to(device)
    torch_y = torch.tensor(train_y, dtype=torch.float32).to(device)
    train_loader = DataLoader(TensorDataset(torch_X, torch_y), batch_size=32, shuffle=True)
    test_loader = DataLoader(TensorDataset(torch.tensor(test_X, dtype=torch.float32).to(device), torch.tensor(test_y, dtype=torch.float32).to(device)), batch_size=32, shuffle=False)
    # 定义模型
    input_size = torch_X.shape[1]
    hidden_size = 64
    output_size = 1
    mlp_model = MLP(input_size, hidden_size, output_size).to(device)
    
    # 定义损失函数和优化器
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(mlp_model.parameters(), lr=0.001)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.5)
    
    # 训练模型
    num_epochs = 200
    for epoch in range(num_epochs):
        for inputs, targets in train_loader:
            optimizer.zero_grad()
            outputs = mlp_model(inputs)
            loss = criterion(outputs.squeeze(), targets)
            loss.backward()
            optimizer.step()
        scheduler.step()
    
    # 验证效果
    mlp_model.eval()
    predicted = []
    true_values = []
    with torch.no_grad():
        for inputs, targets in test_loader:
            outputs = mlp_model(inputs)
            predicted.extend(outputs.cpu().numpy().flatten())
            true_values.extend(targets.cpu().numpy().flatten())
        correlation_matrix = np.corrcoef(true_values, predicted)
        correlation_xy = correlation_matrix[0, 1]
        print('correlation_xy:', correlation_xy)
        print('r2_score:', r2_score(true_values, predicted))
        print('mae:', mean_absolute_error(true_values, predicted))
        
    # 保存模型
    torch.save(mlp_model.state_dict(), 'MLP-SVMC Model/mlp_model.pth')
    
def MLP_load(path):
    # 加载模型
    model = MLP(input_size=30, hidden_size=64, output_size=1).to(device)
    model.load_state_dict(torch.load(path))
    return model
if __name__ == '__main__':
    
    #查看文件夹是否存在,不存在则创建
    if not os.path.exists('MLP-SVMC Model'):
        os.makedirs('MLP-SVMC Model')
        
        
    #查看模型是否存在
    if os.path.exists('MLP-SVMC Model/mlp_model.pth'):
        print('模型已存在，加载模型')
        MLP_model = MLP_load('MLP-SVMC Model/mlp_model.pth')
    else:
        print('模型不存在，开始训练')
        MLP_train(data)
        MLP_model = MLP_load('MLP-SVMC Model/mlp_model.pth')
    MLP_model.eval()
    
    # 读取数据
    
    
    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].values
    schema = [
        'Set number',
        'Single-machine capacity',
        'Hub project permanent area (mu)',
        'Normal water level of lower reservoir (m)',
        'Normal storage level of Upper Reservoir (m)',
    ]
    X_val = data_val.loc[:, schema].values
    y_val = data_val.iloc[:, -1].values
    svm_X = data.loc[:, schema].values

    # 数据预处理1
    scaler_X = StandardScaler()
    mlp_X = deepcopy(scaler_X.fit_transform(X))

    # 使用MLP特征提取
    MLP_features = MLP_model(torch.tensor(mlp_X, dtype=torch.float32).to(device)).cpu().detach().numpy()
    MLP_features_val = MLP_model(torch.tensor(scaler_X.transform(data_val.iloc[:, :-1].values), dtype=torch.float32).to(device)).cpu().detach().numpy()
    
    # 将新特征合并到svm的数据中
    svm_X = np.column_stack((svm_X, MLP_features))
    X_val = np.column_stack((X_val, MLP_features_val))
    
    # 加载svm模型，进行预测
    svm_model = SVMCluster(svm_X, y)
    
    #如果存在模型，则不训练,否则训练
    if not os.path.exists('MLP-SVMC Model/MeanShift.pkl'):
        svm_model.cluster()
        svm_model.train()
    
    #预测
    y_pred = svm_model.predict(X_val)
    
    #计算指标
    print('predicted:', y_pred)
    print('Mean Absolute Error:', mean_absolute_error(y_val, y_pred))
    print('Max Absolute Error:', np.abs(y_val - y_pred).max())
    print('Min Absolute Error:', np.abs(y_val - y_pred).min())
    print('R2:', r2_score(y_val, y_pred))

    

    
    