import torch
import pandas as pd
import numpy as np
import torch.nn as nn
from sklearn.preprocessing import StandardScaler
import joblib

# 加载测试数据
test_df = pd.read_csv('processed_clt_4_data.csv')

# 定义模型结构
class ResNetLikeCNN_DNN(nn.Module):
    def __init__(self, input_size):
        super(ResNetLikeCNN_DNN, self).__init__()
        self.conv1 = nn.Conv1d(1, 16, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm1d(16)
        self.relu1 = nn.LeakyReLU(0.01)
        # 增加额外的残差块
        self.resblock1 = ResidualBlock(16, 16)
        self.resblock2 = ResidualBlock(16, 32)
        self.resblock3 = ResidualBlock(32, 64, stride=2)
        self.fc1 = nn.Linear(64 * (input_size // 2), 128)
        self.fc2 = nn.Linear(128, 64)
        self.fc3 = nn.Linear(64, 64)
        self.fc4 = nn.Linear(64, 1)
        self.relu = nn.LeakyReLU(0.01)
        self.dropout = nn.Dropout(p=0.4)

    def forward(self, x):
        x = self.relu1(self.bn1(self.conv1(x)))
        x = self.resblock1(x)
        x = self.resblock2(x)
        x = self.resblock3(x)
        x = x.view(x.size(0), -1)
        x = self.relu(self.fc1(x))
        x = self.dropout(x)
        x = self.relu(self.fc2(x))
        x = self.relu(self.fc3(x))
        x = self.fc4(x)
        return x

# 残差块定义
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm1d(out_channels)
        self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm1d(out_channels)
        self.relu = nn.LeakyReLU(0.01)
        self.downsample = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.downsample = nn.Sequential(
                nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride),
                nn.BatchNorm1d(out_channels)
            )

    def forward(self, x):
        residual = self.downsample(x)
        x = self.relu(self.bn1(self.conv1(x)))
        x = self.bn2(self.conv2(x))
        x += residual
        return self.relu(x)

# 设置设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 定义要使用的特征列
features = [
    'fy_cth_scaled', 'fy_ctt_scaled', 'fy_ctp_scaled', 'fy_olr_scaled',
    'fy_lat_sin', 'fy_lat_cos', 'fy_lon_sin', 'fy_lon_cos',
    'band1_scaled', 'band2_scaled', 'band3_scaled', 'band4_scaled',
    'band5_scaled', 'band6_scaled', 'band7_brightness_temperature', 'band7_scaled',
    'band8_brightness_temperature', 'band8_scaled', 'band9_brightness_temperature', 'band9_scaled',
    'band10_brightness_temperature', 'band10_scaled', 'band11_brightness_temperature', 'band11_scaled',
    'band12_brightness_temperature', 'band12_scaled', 'band13_brightness_temperature', 'band13_scaled',
    'band14_brightness_temperature', 'band14_scaled', 'band1_gray_value_scaled', 'band2_gray_value_scaled',
    'band3_gray_value_scaled', 'band4_gray_value_scaled', 'band5_gray_value_scaled', 'band6_gray_value_scaled',
    'band7_gray_value_scaled', 'band8_gray_value_scaled', 'band9_gray_value_scaled', 'band10_gray_value_scaled',
    'band11_gray_value_scaled', 'band12_gray_value_scaled', 'band13_gray_value_scaled', 'band14_gray_value_scaled'
]

# 提取测试数据的特征
X_test = test_df[features].values

# 将特征标准化
scaler_x = StandardScaler()
X_test_scaled = scaler_x.fit_transform(X_test)

# 转换为张量
X_test_tensor = torch.tensor(X_test_scaled, dtype=torch.float32)

# 初始化模型并加载权重
model = ResNetLikeCNN_DNN(X_test.shape[1]).to(device)
model.load_state_dict(torch.load('trained_model_ocean4.pth'))
model.eval()  # 设置为评估模式

# 设置批量大小
batch_size = 64  # 根据显存大小调整

# 分批进行预测
y_test_pred = []
with torch.no_grad():
    for i in range(0, X_test_tensor.size(0), batch_size):
        batch_X = X_test_tensor[i:i + batch_size].unsqueeze(1).to(device)
        batch_pred = model(batch_X).cpu().numpy()
        y_test_pred.append(batch_pred)

# 合并所有批次的预测结果
y_test_pred = np.concatenate(y_test_pred).flatten()

# 加载训练阶段保存的 scaler_y 对象
scaler_y = joblib.load('scaler4_y.pkl')

# 反标准化目标值
y_test_pred_true = scaler_y.inverse_transform(y_test_pred.reshape(-1, 1)).flatten()

# 创建包含预测值的 DataFrame
# test_result_df = pd.DataFrame({'Predicted': y_test_pred_true})
# 创建包含预测值和原始数据的 DataFrame
test_result_df = pd.DataFrame({
    'fy_cbh': y_test_pred_true,
    'fy_cth': test_df['fy_cth'].values,
    'fy_clt': test_df['fy_clt'].values,
    'fy_lat': test_df['fy_lat'].values,
    'fy_lon': test_df['fy_lon'].values,
    'fy_cot': test_df['fy_cth'].values - y_test_pred_true
})

# 保存预测结果至 CSV 文件
test_result_df.to_csv('prediction_ocean4_2020010104_results.csv', index=False)
