import pandas as pd
import joblib
import numpy as np
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import random
from scipy.stats import gaussian_kde
# 设置环境变量来调整内存分配行为
# import os
# os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'

# 固定随机种子
def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)


set_seed(42)  # 固定随机种子

# 读取训练数据和验证数据，只选取 fy_clt 为 5的冰云数据
train_df = pd.read_csv('standard_train_ocean5.csv')
validation_df = pd.read_csv('standaerd_validation_ocean5.csv')

# 提取特征和目标
features = [
    'fy_cth_scaled', 'fy_ctt_scaled', 'fy_ctp_scaled','fy_olr_scaled',
    'fy_lat_sin', 'fy_lat_cos', 'fy_lon_sin', 'fy_lon_cos',
    # 'band1', 'band2', 'band3', 'band4','band5', 'band6',
    'band1_scaled', 'band2_scaled', 'band3_scaled', 'band4_scaled',
    'band5_scaled', 'band6_scaled', 'band7_brightness_temperature', 'band7_scaled',
    'band8_brightness_temperature', 'band8_scaled', 'band9_brightness_temperature', 'band9_scaled',
    'band10_brightness_temperature', 'band10_scaled', 'band11_brightness_temperature', 'band11_scaled',
    'band12_brightness_temperature', 'band12_scaled', 'band13_brightness_temperature', 'band13_scaled',
    'band14_brightness_temperature', 'band14_scaled'
    , 'band1_gray_value_scaled', 'band2_gray_value_scaled',
    'band3_gray_value_scaled', 'band4_gray_value_scaled', 'band5_gray_value_scaled', 'band6_gray_value_scaled',
    'band7_gray_value_scaled', 'band8_gray_value_scaled', 'band9_gray_value_scaled', 'band10_gray_value_scaled',
    'band11_gray_value_scaled', 'band12_gray_value_scaled', 'band13_gray_value_scaled', 'band14_gray_value_scaled'
    # ,'band1_band2_ratio','ndi',
    #  'band1_band5_product','band6_band12_ratio','band6_band1_product','band7_band12_ratio','band7_band5_product'
    ]

target = 'cloudsat_cbh'
X_train = train_df[features].values
y_train = train_df[target].values
X_val = validation_df[features].values
y_val = validation_df[target].values

# 对目标值进行标准化
scaler_y = StandardScaler()
y_train = scaler_y.fit_transform(y_train.reshape(-1, 1)).flatten()
y_val = scaler_y.transform(y_val.reshape(-1, 1)).flatten()

class ResNetLikeCNN_DNN(nn.Module):
    def __init__(self, input_size):
        super(ResNetLikeCNN_DNN, self).__init__()
        # 第一层卷积
        self.conv1 = nn.Conv1d(1, 32, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm1d(32)
        self.relu1 = nn.LeakyReLU(0.01)

        # ResNet 块 1
        self.resblock1_conv1 = nn.Conv1d(32, 32, kernel_size=3, padding=1)
        self.resblock1_bn1 = nn.BatchNorm1d(32)
        self.resblock1_conv2 = nn.Conv1d(32, 32, kernel_size=3, padding=1)
        self.resblock1_bn2 = nn.BatchNorm1d(32)
        self.resblock1_relu = nn.LeakyReLU(0.01)

        # ResNet 块 2
        self.resblock2_conv1 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
        self.resblock2_bn1 = nn.BatchNorm1d(64)
        self.resblock2_conv2 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
        self.resblock2_bn2 = nn.BatchNorm1d(64)
        self.resblock2_relu = nn.LeakyReLU(0.01)

        # 添加投影操作
        self.projection2 = nn.Conv1d(32, 64, kernel_size=1)

        # ResNet 块 3
        self.resblock3_conv1 = nn.Conv1d(64, 128, kernel_size=3, padding=1, stride=2)
        self.resblock3_bn1 = nn.BatchNorm1d(128)
        self.resblock3_conv2 = nn.Conv1d(128, 128, kernel_size=3, padding=1)
        self.resblock3_bn2 = nn.BatchNorm1d(128)
        self.resblock3_relu = nn.LeakyReLU(0.01)

        # 添加投影操作
        self.projection3 = nn.Conv1d(64, 128, kernel_size=1, stride=2)

        self.fc1 = nn.Linear(128 * input_size // 2, 512)  # 注意这里的input_size需要调整
        self.fc2 = nn.Linear(512, 128)
        # self.fc3 = nn.Linear(128, 64)
        self.fc4 = nn.Linear(128, 1)
        self.relu = nn.LeakyReLU(0.01)
        self.dropout = nn.Dropout(p=0.4)

    def forward(self, x):
        # 第一层
        x1 = self.relu1(self.bn1(self.conv1(x)))
        # print("x1 shape:", x1.shape)

        # ResNet 块 1
        res1 = self.resblock1_conv1(x1)
        res1 = self.resblock1_bn1(res1)
        res1 = self.resblock1_relu(res1)
        res1 = self.resblock1_conv2(res1)
        res1 = self.resblock1_bn2(res1)
        x2 = self.resblock1_relu(x1 + res1)
        # print("x2 shape:", x2.shape)

        # ResNet 块 2
        res2 = self.resblock2_conv1(x2)
        res2 = self.resblock2_bn1(res2)
        res2 = self.resblock2_relu(res2)
        res2 = self.resblock2_conv2(res2)
        res2 = self.resblock2_bn2(res2)
        # print("res2 shape:", res2.shape)

        # 投影操作
        x2_projected = self.projection2(x2)
        # print("x2_projected shape:", x2_projected.shape)

        x3 = self.resblock2_relu(x2_projected + res2)
        # print("x3 shape:", x3.shape)

        # 投影操作 for ResNet 块 3
        x3_projected = self.projection3(x3)
        # print("x3_projected shape:", x3_projected.shape)

        # ResNet 块 3
        res3 = self.resblock3_conv1(x3)
        res3 = self.resblock3_bn1(res3)
        res3 = self.resblock3_relu(res3)
        res3 = self.resblock3_conv2(res3)
        res3 = self.resblock3_bn2(res3)
        x4 = self.resblock3_relu(x3_projected + res3)
        # print("x4 shape:", x4.shape)

        x4 = x4.view(x4.size(0), -1)
        x5 = self.relu(self.fc1(x4))
        x5 = self.dropout(x5)
        x6 = self.relu(self.fc2(x5))
        # x7 = self.relu(self.fc3(x6))
        x8 = self.fc4(x6)
        return x8

# class CNN_DNN(nn.Module):
#     def __init__(self, input_size):
#         super(CNN_DNN, self).__init__()
#         self.conv1 = nn.Conv1d(1, 32, kernel_size=3, padding=1)
#         self.bn1 = nn.BatchNorm1d(32)
#         self.conv2 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
#         self.bn2 = nn.BatchNorm1d(64)
#         self.conv3 = nn.Conv1d(64, 128, kernel_size=3, padding=1)
#         self.bn3 = nn.BatchNorm1d(128)
#
#         self.fc1 = nn.Linear(128 * input_size, 256)
#         self.fc2 = nn.Linear(256, 128)
#         self.fc3 = nn.Linear(128, 1)
#         self.relu = nn.LeakyReLU(0.01)
#         self.dropout = nn.Dropout(p=0.3)
#
#     def forward(self, x):
#         x = self.relu(self.bn1(self.conv1(x)))
#         x = self.relu(self.bn2(self.conv2(x)))
#         x = self.relu(self.bn3(self.conv3(x)))
#         x = x.view(x.size(0), -1)
#         x = self.relu(self.fc1(x))
#         x = self.dropout(x)
#         x = self.relu(self.fc2(x))
#         x = self.fc3(x)
#         return x
# 训练和验证过程
def train_and_validate(model, train_loader, val_loader, criterion, optimizer, scheduler, device, num_epochs=150,
                       early_stopping_patience=20):
    best_val_loss = float('inf')
    early_stopping_counter = 0
    scaler = torch.cuda.amp.GradScaler()  # 使用混合精度

    # 用于记录损失和 R² 的列表
    train_losses = []
    val_losses = []
    train_r2_scores = []
    val_r2_scores = []

    for epoch in range(num_epochs):
        model.train()
        train_loss = 0
        y_train_true, y_train_pred = [], []

        for X_batch, y_batch in train_loader:
            X_batch, y_batch = X_batch.to(device), y_batch.to(device)
            optimizer.zero_grad()

            with torch.cuda.amp.autocast():  # 自动混合精度
                outputs = model(X_batch.unsqueeze(1))
                loss = criterion(outputs, y_batch)

            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()

            train_loss += loss.item()

            # 收集训练集的预测和真实值以计算 R²
            y_train_true.extend(y_batch.cpu().numpy())
            y_train_pred.extend(outputs.cpu().detach().numpy())

        # 计算训练集的平均损失和 R²
        train_loss /= len(train_loader)
        train_losses.append(train_loss)
        train_r2 = r2_score(np.array(y_train_true), np.array(y_train_pred))
        train_r2_scores.append(train_r2)

        # 验证阶段
        val_loss = 0
        y_val_true, y_val_pred = [], []
        model.eval()
        with torch.no_grad():
            for X_batch, y_batch in val_loader:
                X_batch, y_batch = X_batch.to(device), y_batch.to(device)
                outputs = model(X_batch.unsqueeze(1))
                val_loss += criterion(outputs, y_batch).item()

                # 收集验证集的预测和真实值以计算 R²
                y_val_true.extend(y_batch.cpu().numpy())
                y_val_pred.extend(outputs.cpu().numpy())

        # 计算验证集的平均损失和 R²
        val_loss /= len(val_loader)
        val_losses.append(val_loss)
        val_r2 = r2_score(np.array(y_val_true), np.array(y_val_pred))
        val_r2_scores.append(val_r2)

        print(
            f'Epoch [{epoch + 1}/{num_epochs}], Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, Train R²: {train_r2:.4f}, Val R²: {val_r2:.4f}')

        scheduler.step(val_loss)

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            early_stopping_counter = 0
            best_model_weights = model.state_dict()  # 保存最佳模型
        else:
            early_stopping_counter += 1
            if early_stopping_counter >= early_stopping_patience:
                print(f'Early stopping at epoch {epoch + 1}')
                break

        # torch.save({
        #     'epoch': epoch,
        #     'model_state_dict': model.state_dict(),
        #     'optimizer_state_dict': optimizer.state_dict(),
        #     'loss': loss,
        # }, f'checkpoint_epoch_{epoch}.pth')
        # # 在每个 epoch 结束时清除缓存
        # torch.cuda.empty_cache()

    # 加载最佳权重
    model.load_state_dict(best_model_weights)
    return model, train_losses, val_losses, train_r2_scores, val_r2_scores


# 绘制损失和 R² 曲线
def plot_metrics(train_losses, val_losses, train_r2_scores, val_r2_scores):
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(12, 5))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, label='Train Loss')
    plt.plot(epochs, val_losses, label='Val Loss')
    plt.title('Ice Clouds Loss per Epoch')
    plt.xlabel('Epochs',fontsize=12)
    plt.ylabel('Loss ',fontsize=12)
    plt.legend()

    # 绘制 R² 曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_r2_scores, label='Train R²')
    plt.plot(epochs, val_r2_scores, label='Val R²')
    plt.title('Ice Clouds R² per Epoch')
    plt.xlabel('Epochs',fontsize=12)
    plt.ylabel('R² Score',fontsize=12)
    plt.legend()

    plt.tight_layout()
    plt.show()


# 计算 RMSE 和 MAE
def calculate_rmse_mae(y_true, y_pred):
    mse = np.mean((y_true - y_pred)**2)
    rmse = np.sqrt(mse)
    mae = np.mean(np.abs(y_true - y_pred))
    return rmse, mae


# 绘制验证集样本数量和预测偏差值
def plot_validation_stats(y_true, y_pred_true):
    errors = (y_pred_true - y_true) / 1000
    plt.figure(figsize=(5, 5))

    # 设置背景颜色
    x = np.linspace(min(errors), max(errors), 1000)
    kde = gaussian_kde(errors)
    y = kde(x)

    # 创建双坐标轴
    fig, ax1 = plt.subplots()

    # 绘制直方图
    hist, bins, patches = ax1.hist(errors, bins=50, color='#5a78e4', edgecolor='black',  density=False, label='Histogram')
    ax1.set_xlabel('Error',  fontsize=12)

    ax1.set_ylabel('Frequency', color='black',  fontsize=12)
    ax1.tick_params(axis='y', labelcolor='black')

    # 创建第二个坐标轴
    ax2 = ax1.twinx()
    # 设置第二个坐标轴的起点为0
    ax2.set_ylim(bottom=0)
    ax2.plot(x, y, color='#1062ec', linewidth=2, label='KDE')
    ax2.set_ylabel('Density', color='black',  fontsize=12)
    ax2.tick_params(axis='y', labelcolor='black')

    plt.title('Ice Clouds Error Frequency Distribution')

    # 添加均值趋势线
    mean_error = np.mean(errors)
    ax1.axvline(x=mean_error, color='red', linestyle='--', label='Mean Error')

    # 找到数据最大值来手动设置y轴范围
    max_y1value = max([max(hist)])
    max_y2value = max([ max(y)])
    ax1.set_ylim(0, max_y1value * 1.1)
    ax2.set_ylim(0, max_y2value * 1.1)

    # 合并图例
    ax1.legend(loc='upper left')
    ax2.legend(loc='upper right')

    plt.show()

def plot_train_val_scatter(y_true, y_pred_true):
    # 设置整个图形大小为(7, 5)
    fig = plt.figure(figsize=(6, 5))
    plt.subplots_adjust(left=0.1, right=0.8, bottom=0.1, top=0.9)
    ax = fig.add_axes([0.1, 0.1, 0.65, 0.8])
    y_true = y_true / 1000
    y_pred_true = y_pred_true / 1000
    # 计算数据点的密度
    xy = np.vstack([y_true, y_pred_true])
    z = gaussian_kde(xy)(xy)

    # 根据密度设置颜色并添加mappable到坐标轴
    sc = ax.scatter(y_true, y_pred_true, c=z, label='data', s=1, cmap='coolwarm')

    # 添加趋势线
    fit = np.polyfit(y_true, y_pred_true, 1)
    fit_fn = np.poly1d(fit)
    ax.plot(y_true, fit_fn(y_true), 'r', label='Y={:.2f}*X+{:.2f}'.format(fit[0], fit[1]))

    # 添加y=x线
    xmin, xmax = ax.get_xlim()
    ax.plot([xmin, xmax], [xmin, xmax], 'k--', label='Y=X')

    # 调整起点为(0,0)并整体左移
    ax.set_xlim(0, ax.get_xlim()[1])
    ax.set_ylim(0, ax.get_ylim()[1])

    ax.set_xlabel('CloudSat CBH/km',  fontsize=12)
    ax.set_ylabel('FY-4A CBH/km',  fontsize=12)
    ax.set_title('Ice Clouds',  fontsize=14)
    # 调整图例位置
    ax.legend(loc='best')

    # 调整颜色条的位置和大小
    # cax = fig.add_axes([0.75, 0.15, 0.05, 0.7])
    cax = fig.add_axes([0.77, 0.15, 0.03, 0.7])
    sm = plt.cm.ScalarMappable(cmap='coolwarm', norm=plt.Normalize(vmin=z.min(), vmax=z.max()))
    sm.set_array([])
    cbar = plt.colorbar(sm, cax=cax)
    cbar.set_label('Scatter Density', fontsize=10)
    plt.show()

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

X_train_tensor = torch.tensor(X_train, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train, dtype=torch.float32).view(-1, 1)
X_val_tensor = torch.tensor(X_val, dtype=torch.float32)
y_val_tensor = torch.tensor(y_val, dtype=torch.float32).view(-1, 1)

train_loader = DataLoader(TensorDataset(X_train_tensor, y_train_tensor), batch_size=64, shuffle=True)
val_loader = DataLoader(TensorDataset(X_val_tensor, y_val_tensor), batch_size=64, shuffle=False)

model = ResNetLikeCNN_DNN(X_train.shape[1]).to(device)
# model = CNN_DNN(X_train.shape[1]).to(device)
# model = SimpleCNN(X_train.shape[1]).to(device)

criterion = nn.MSELoss()
# optimizer = optim.AdamW(model.parameters(), lr=0.0005)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.3, patience=3, verbose=True)
# 定义优化器，并添加L2正则化
optimizer = optim.AdamW(model.parameters(), lr=0.0005, weight_decay=0.1)  # 添加L2正则化，weight_decay 是正则化系数
# 定义学习率调度器
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.3, patience=3, verbose=True)

# 训练和验证
model, train_losses, val_losses, train_r2_scores, val_r2_scores = train_and_validate(
    model, train_loader, val_loader, criterion, optimizer, scheduler, device
)

# plot_metrics(train_losses, val_losses, train_r2_scores, val_r2_scores)

with torch.no_grad():
    X_val_tensor = X_val_tensor.to(device)
    y_pred = model(X_val_tensor.unsqueeze(1)).cpu().numpy()
    y_val_true = y_val_tensor.cpu().numpy()

    X_train_tensor = X_train_tensor.to(device)
    y_train_pred = model(X_train_tensor.unsqueeze(1)).cpu().numpy()
    y_train_true = y_train_tensor.cpu().numpy()

# 反标准化目标值
y_pred_true = scaler_y.inverse_transform(y_pred).flatten()
y_val_true = scaler_y.inverse_transform(y_val_true).flatten()
y_train_pred_true = scaler_y.inverse_transform(y_train_pred).flatten()
y_train_true = scaler_y.inverse_transform(y_train_true).flatten()

# 创建一个包含预测值和真实值的 DataFrame
result_df = pd.DataFrame({'Predicted': y_pred_true, 'True': y_val_true})

# 存储为 CSV 文件
csv_file_path = 'prediction_ocean5_results.csv.csv'
# 保存结果至 CSV 文件
result_df.to_csv(csv_file_path, index=False)

r2 = r2_score(y_val_true, y_pred_true)
print(f'R² Score: {r2:.4f}')

rmse, mae = calculate_rmse_mae(y_val_true, y_pred_true)
print(f'RMSE: {rmse:.4f}, MAE: {mae:.4f}')

# 保存模型
torch.save(model.state_dict(), 'trained_model_ocean5.pth')
# 调用曲线绘制函数
plot_metrics(train_losses, val_losses, train_r2_scores, val_r2_scores)
# 绘制验证集样本数量和预测偏差值
plot_validation_stats(y_val_true, y_pred_true)
# 绘制训练结果和验证结果关系散点图
plot_train_val_scatter(y_val_true, y_pred_true)
# 保存 scaler_y 到文件
joblib.dump(scaler_y, 'scaler5_y.pkl')