import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
from datetime import datetime

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score, roc_auc_score, f1_score
from sklearn.impute import KNNImputer
from imblearn.over_sampling import SMOTE
from sklearn.feature_selection import SelectKBest, f_classif
import optuna
import warnings
warnings.filterwarnings("ignore")

# 读取数据
file_path = "usa_rain_prediction_dataset_2024_2025.csv"
df = pd.read_csv(file_path)

# 数据信息查询
print("Number of Columns:", df.shape[1])
print("Number of Rows:", df.shape[0])
df.info()

# 缺失值及重复值检查
msno.matrix(df, color=(0.1, 0.1, 0.1))
plt.show()
print("Number of Duplicates:", df.duplicated().sum())

# 数据日期转化
df["Date"] = pd.to_datetime(df["Date"])
df["Year"] = df["Date"].dt.year
df["Month"] = df["Date"].dt.month
df["Day"] = df["Date"].dt.day

# 增加滞后特征和移动平均值
df['Rain_Tomorrow_Lag1'] = df['Rain Tomorrow'].shift(1)
df['Rain_Tomorrow_Lag7'] = df['Rain Tomorrow'].shift(7)
df['Rain_MA_3'] = df['Rain Tomorrow'].rolling(window=3).mean()

# 删除日期列
df = df.drop(columns=["Date"])

# 导入地区的编码转换
encoder = LabelEncoder()
df["Location"] = encoder.fit_transform(df["Location"])

# 使用KNN Imputer处理缺失值
imputer = KNNImputer(n_neighbors=5)
df[df.columns] = imputer.fit_transform(df)

# 特征选择
selector = SelectKBest(score_func=f_classif, k=10)
X_selected = selector.fit_transform(df.drop(columns=['Rain Tomorrow']), df['Rain Tomorrow'])
selected_features = df.drop(columns=['Rain Tomorrow']).columns[selector.get_support()]
print("Selected Features:", selected_features)

# 数据标准化
scaler = StandardScaler()
X_selected = scaler.fit_transform(X_selected)

# 修正标签类型为整数
y = df['Rain Tomorrow'].astype(int)

# 使用SMOTE进行类别不平衡处理
smote = SMOTE(random_state=42)
X_resampled, y_resampled = smote.fit_resample(X_selected, y)

# 抽样减少数据规模
X_resampled, _, y_resampled, _ = train_test_split(X_resampled, y_resampled, test_size=0.8, random_state=42)

# 将数据集转换为PyTorch张量
def to_tensor(data, labels):
    data_tensor = torch.tensor(data, dtype=torch.float32)
    labels_tensor = torch.tensor(labels.values, dtype=torch.float32)
    return data_tensor, labels_tensor

X_tensor, y_tensor = to_tensor(X_resampled, y_resampled)

data = torch.utils.data.TensorDataset(X_tensor, y_tensor)
train_size = int(0.8 * len(data))
test_size = len(data) - train_size
train_dataset, test_dataset = random_split(data, [train_size, test_size])

train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

# 轻量化Transformer-ResNet混合模型
class LightTransformerResNetBlock(nn.Module):
    def __init__(self, input_dim, n_heads, ff_hidden_dim, dropout=0.1):
        super(LightTransformerResNetBlock, self).__init__()
        self.attention = nn.MultiheadAttention(embed_dim=input_dim, num_heads=n_heads, batch_first=True)
        self.feed_forward = nn.Sequential(
            nn.Linear(input_dim, ff_hidden_dim),
            nn.ReLU(),
            nn.Linear(ff_hidden_dim, input_dim)
        )
        self.norm1 = nn.LayerNorm(input_dim)
        self.norm2 = nn.LayerNorm(input_dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        # Self-attention layer with residual connection
        attn_output, _ = self.attention(x, x, x)
        x = self.norm1(x + self.dropout(attn_output))
        # Feed-forward layer with residual connection
        ff_output = self.feed_forward(x)
        x = self.norm2(x + self.dropout(ff_output))
        return x

class LightTransformerResNetRainPredictionModel(nn.Module):
    def __init__(self, input_size, num_layers, n_heads, ff_hidden_dim, dropout=0.1):
        super(LightTransformerResNetRainPredictionModel, self).__init__()
        self.embedding = nn.Linear(input_size, input_size)
        self.encoder_blocks = nn.ModuleList([
            LightTransformerResNetBlock(input_size, n_heads, ff_hidden_dim, dropout) for _ in range(num_layers)
        ])
        self.fc = nn.Linear(input_size, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.embedding(x).unsqueeze(1)  # Add sequence dimension
        for encoder_block in self.encoder_blocks:
            x = encoder_block(x)
        x = x.squeeze(1)  # Remove sequence dimension
        x = self.fc(x)
        x = self.sigmoid(x)
        return x

# 初始化轻量化模型、损失函数和优化器
input_size = X_resampled.shape[1]
num_layers = 2
n_heads = 2
ff_hidden_dim = 64
model = LightTransformerResNetRainPredictionModel(input_size, num_layers, n_heads, ff_hidden_dim)
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 最终模型训练
def train_model(model, criterion, optimizer, train_loader, num_epochs=10):
    model.train()
    for epoch in range(num_epochs):
        running_loss = 0.0
        for inputs, labels in train_loader:
            labels = labels.view(-1, 1)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item() * inputs.size(0)
        epoch_loss = running_loss / len(train_loader.dataset)
        print(f'Epoch {epoch+1}/{num_epochs}, Loss: {epoch_loss:.4f}')
    torch.save(model.state_dict(), 'LightTransformerResNetRainPredictionModel.pth')

train_model(model, criterion, optimizer, train_loader)

# 测试模型
def evaluate_model(model, test_loader):
    model.eval()
    predictions, actuals = [], []
    with torch.no_grad():
        for inputs, labels in test_loader:
            outputs = model(inputs)
            predictions.extend(outputs.view(-1).tolist())
            actuals.extend(labels.tolist())
    predictions = [1 if p > 0.5 else 0 for p in predictions]
    accuracy = accuracy_score(actuals, predictions)
    roc_auc = roc_auc_score(actuals, predictions)
    f1 = f1_score(actuals, predictions)
    print(f'Accuracy: {accuracy:.4f}')
    print(f'ROC AUC: {roc_auc:.4f}')
    print(f'F1 Score: {f1:.4f}')
    print(classification_report(actuals, predictions))

evaluate_model(model, test_loader)
