import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, auc
from imblearn.over_sampling import SMOTE
import warnings
import os
warnings.filterwarnings('ignore')

# Create output directory if it doesn't exist
output_dir = 'Kaggle_playground_降水预测/output'
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# Data loading and preprocessing
train = pd.read_csv('Kaggle_playground_降水预测/train.csv')
test = pd.read_csv('Kaggle_playground_降水预测/test.csv')
print(train.shape, test.shape)

# Remove duplicates and check for missing values
train.drop_duplicates(inplace=True)
print("Missing values:", train.isnull().sum())

# Feature engineering
def feature_engineering(df):
    df = df.copy()
    
    # Combined features
    df['hci'] = df['humidity'] * df['cloud']
    df['hsi'] = df['humidity'] * df['sunshine']
    df['csr'] = df['cloud'] / (df['sunshine'] + 1e-5)
    df['rd'] = 100 - df['humidity']
    df['sp'] = df['sunshine'] / (df['sunshine'] + df['cloud'] + 1e-5)
    df['wi'] = (0.4 * df['humidity']) + (0.3 * df['cloud']) - (0.3 * df['sunshine'])
    
    # More feature combinations
    df['temp_range'] = df['maxtemp'] - df['mintemp']
    df['temp_dew_diff'] = df['temparature'] - df['dewpoint']
    df['humidity_cloud_ratio'] = df['humidity'] / (df['cloud'] + 1e-3)
    df['sunshine_cloud_ratio'] = df['sunshine'] / (df['cloud'] + 1e-3)
    df['pressure_wind_interaction'] = df['pressure'] * df['winddirection']
    df['temp_pressure_ratio'] = df['temparature'] / (df['pressure'] + 1e-3)
    df['wind_pressure_ratio'] = df['windspeed'] / (df['pressure'] + 1e-3)
    
    return df

# Apply feature engineering
train_comb = feature_engineering(train)
test = feature_engineering(test)

# Handle missing values in the test set
if test.isnull().sum().sum() > 0:
    print("\nHandling missing values in test data...")
    for col in test.columns:
        if test[col].isnull().sum() > 0:
            test[col] = test[col].fillna(train_comb[col].median())

# Data preparation
X = train_comb.drop(['id', 'rainfall'], axis=1)
y = train_comb['rainfall']

# Feature scaling
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# Use SMOTE to handle class imbalance
smote = SMOTE(random_state=42)
X_resampled, y_resampled = smote.fit_resample(X_scaled, y)

# Split into training and validation sets
X_train, X_val, y_train, y_val = train_test_split(X_resampled, y_resampled, test_size=0.2, random_state=42)

# Reshape data for LSTM (3D format)
X_train_lstm = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
X_val_lstm = X_val.reshape((X_val.shape[0], 1, X_val.shape[1]))

# Define LSTM model
class SimpleLSTMModel(nn.Module):
    def __init__(self, input_dim, hidden_dim=50, output_dim=1):
        super(SimpleLSTMModel, self).__init__()
        
        # First LSTM layer
        self.lstm1 = nn.LSTM(
            input_size=input_dim, 
            hidden_size=hidden_dim, 
            batch_first=True
        )
        self.dropout1 = nn.Dropout(0.2)
        
        # Second LSTM layer
        self.lstm2 = nn.LSTM(
            input_size=hidden_dim, 
            hidden_size=hidden_dim//2, 
            batch_first=True
        )
        self.dropout2 = nn.Dropout(0.2)
        
        # Output layer
        self.fc = nn.Linear(hidden_dim//2, output_dim)
        self.sigmoid = nn.Sigmoid()
    
    def forward(self, x):
        # First LSTM layer
        lstm1_out, _ = self.lstm1(x)
        lstm1_out = self.dropout1(lstm1_out)
        
        # Second LSTM layer - only take the output from the last time step
        lstm2_out, _ = self.lstm2(lstm1_out)
        lstm2_out = self.dropout2(lstm2_out[:, -1, :])
        
        # Output layer
        output = self.fc(lstm2_out)
        output = self.sigmoid(output)
        
        return output

# Initialize model and training parameters
input_dim = X_train.shape[1]  # Input feature dimension
model = SimpleLSTMModel(input_dim, hidden_dim=50, output_dim=1)

# Define loss function and optimizer
criterion = nn.BCELoss()  # Binary cross entropy loss
optimizer = optim.Adam(model.parameters(), lr=0.005)

# Training settings
num_epochs = 80
batch_size = 16
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# Create data loaders
train_dataset = TensorDataset(torch.FloatTensor(X_train_lstm), torch.FloatTensor(y_train.values.reshape(-1, 1)))
val_dataset = TensorDataset(torch.FloatTensor(X_val_lstm), torch.FloatTensor(y_val.values.reshape(-1, 1)))
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size)

# Record training metrics
train_losses = []
val_losses = []
train_accs = []
val_accs = []
train_aucs = []
val_aucs = []

# Early stopping settings
best_val_auc = 0
patience = 10
counter = 0
best_model_state = None

# Training loop
for epoch in range(num_epochs):
    model.train()
    train_loss = 0.0
    train_preds = []
    train_true = []
    
    # Training phase
    for inputs, targets in train_loader:
        inputs, targets = inputs.to(device), targets.to(device)
        
        # Forward propagation
        outputs = model(inputs)
        
        # Calculate loss
        loss = criterion(outputs, targets)
        
        # Backpropagation and optimization
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        # Accumulate loss
        train_loss += loss.item() * inputs.size(0)
        
        # Store predictions and true values for metrics calculation
        train_preds.extend(outputs.cpu().detach().numpy())
        train_true.extend(targets.cpu().numpy())
    
    # Calculate average loss and metrics for training set
    train_loss = train_loss / len(train_dataset)
    train_pred_labels = (np.array(train_preds) > 0.5).astype(int)
    train_acc = accuracy_score(np.array(train_true).flatten(), train_pred_labels.flatten())
    train_auc = roc_auc_score(np.array(train_true).flatten(), np.array(train_preds).flatten())
    
    # Validation phase
    model.eval()
    val_loss = 0.0
    val_preds = []
    val_true = []
    
    with torch.no_grad():
        for inputs, targets in val_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            
            # Forward propagation
            outputs = model(inputs)
            
            # Calculate loss
            loss = criterion(outputs, targets)
            
            # Accumulate loss
            val_loss += loss.item() * inputs.size(0)
            
            # Store predictions and true values for metrics calculation
            val_preds.extend(outputs.cpu().numpy())
            val_true.extend(targets.cpu().numpy())
    
    # Calculate average loss and metrics for validation set
    val_loss = val_loss / len(val_dataset)
    val_pred_labels = (np.array(val_preds) > 0.5).astype(int)
    val_acc = accuracy_score(np.array(val_true).flatten(), val_pred_labels.flatten())
    val_auc = roc_auc_score(np.array(val_true).flatten(), np.array(val_preds).flatten())
    
    # Save metrics
    train_losses.append(train_loss)
    val_losses.append(val_loss)
    train_accs.append(train_acc)
    val_accs.append(val_acc)
    train_aucs.append(train_auc)
    val_aucs.append(val_auc)
    
    # Print training and validation metrics
    print(f'Epoch {epoch+1}/{num_epochs}')
    print(f'Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, Train AUC: {train_auc:.4f}')
    print(f'Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}, Val AUC: {val_auc:.4f}')
    print('-' * 50)
    
    # Early stopping check
    if val_auc > best_val_auc:
        best_val_auc = val_auc
        counter = 0
        best_model_state = model.state_dict().copy()
        print(f"New best validation AUC: {val_auc:.4f}")
    else:
        counter += 1
        print(f"Early stopping counter: {counter}/{patience}")
        
    if counter >= patience:
        print(f"Early stopping triggered at epoch {epoch+1}")
        break

# Load best model
if best_model_state is not None:
    model.load_state_dict(best_model_state)
    print(f"Loaded best model with validation AUC: {best_val_auc:.4f}")

# Plot ROC curve
model.eval()
val_preds = []
val_true = []

with torch.no_grad():
    for inputs, targets in val_loader:
        inputs, targets = inputs.to(device), targets.to(device)
        outputs = model(inputs)
        val_preds.extend(outputs.cpu().numpy())
        val_true.extend(targets.cpu().numpy())

val_probs = np.array(val_preds).flatten()
val_true_flat = np.array(val_true).flatten()

fpr, tpr, thresholds = roc_curve(val_true_flat, val_probs)
roc_auc = auc(fpr, tpr)

plt.figure(figsize=(10, 8))
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.3f})')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=14)
plt.ylabel('True Positive Rate', fontsize=14)
plt.title('Receiver Operating Characteristic (ROC) Curve', fontsize=16)
plt.legend(loc="lower right", fontsize=12)
plt.grid(alpha=0.3)
plt.savefig(os.path.join(output_dir, 'roc_curve.png'), dpi=300, bbox_inches='tight')
plt.show()

# Plot training and validation metrics
epochs = np.arange(1, len(train_aucs) + 1)

plt.figure(figsize=(12, 8))
plt.subplot(2, 1, 1)
plt.plot(epochs, train_accs, 'b-', label='Training Accuracy')
plt.plot(epochs, val_accs, 'r-', label='Validation Accuracy')
plt.title('Training and Validation Accuracy vs Epochs', fontsize=14)
plt.xlabel('Epochs', fontsize=12)
plt.ylabel('Accuracy', fontsize=12)
plt.legend(fontsize=12)
plt.grid(alpha=0.3)

plt.subplot(2, 1, 2)
plt.plot(epochs, train_aucs, 'b-', label='Training AUC')
plt.plot(epochs, val_aucs, 'r-', label='Validation AUC')
plt.title('Training and Validation AUC vs Epochs', fontsize=14)
plt.xlabel('Epochs', fontsize=12)
plt.ylabel('AUC Value', fontsize=12)
plt.legend(fontsize=12)
plt.grid(alpha=0.3)
plt.ylim([0.5, 1.0])

# Mark early stopping point
if counter >= patience and len(epochs) > (len(train_aucs) - patience):
    early_stop_epoch = len(train_aucs) - patience
    plt.axvline(x=early_stop_epoch, color='g', linestyle='--', label='Early Stop Point')
    plt.legend(fontsize=12)

plt.tight_layout()
plt.savefig(os.path.join(output_dir, 'training_metrics.png'), dpi=300, bbox_inches='tight')
plt.show()

# Model training and evaluation complete
print("LSTM model training and evaluation completed!")