import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn import TransformerEncoderLayer, TransformerEncoder
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score

# Set random seeds for reproducibility
torch.manual_seed(42)
np.random.seed(42)


# --- Data Loading & Preprocessing ---
def load_and_preprocess_data():
    df_summary = pd.read_excel(r"C:\Users\Administrator\PycharmProjects\PythonProject1\data\bwic.xlsx", engine='openpyxl')
    df_detail = pd.read_excel(r"C:\Users\Administrator\PycharmProjects\PythonProject1\data\bid.xlsx", engine='openpyxl')
    df = df_summary.merge(df_detail, on='bwic_id').drop(['bwic_id', 'bid_id'], axis=1)

    # Separate target and features
    target = "starting_amount"
    cat_cols = ['cusip', 'sett_country', 'created_by', 'request_cp', 'client_id', 'bid_successful']
    num_cols = [col for col in df.columns if col not in cat_cols + [target]]

    # Standardize numerical features
    scaler = StandardScaler()
    df[num_cols] = scaler.fit_transform(df[num_cols])

    return df, target, cat_cols, num_cols


# --- Dataset Class ---
class TabularDataset(Dataset):
    def __init__(self, data, target, cat_cols, num_cols):
        self.cat_data = data[cat_cols].apply(lambda x: x.astype('category').cat.codes).values
        self.num_data = data[num_cols].values
        self.target = data[target].values

    def __len__(self):
        return len(self.target)

    def __getitem__(self, idx):
        return {
            'cat': torch.tensor(self.cat_data[idx], dtype=torch.long),
            'num': torch.tensor(self.num_data[idx], dtype=torch.float32),
            'target': torch.tensor(self.target[idx], dtype=torch.float32)
        }


# --- Enhanced Transformer Model ---
class TransformerModel(nn.Module):
    def __init__(self, num_categories, num_numerical, embed_dim=128, nhead=8, num_layers=4):
        super().__init__()

        # Categorical embeddings with padding_idx=0
        self.embeddings = nn.ModuleList([
            nn.Embedding(num_categories[i] + 1, embed_dim, padding_idx=0)
            for i in range(len(num_categories))
        ])

        # Numerical projection with LayerNorm
        self.num_proj = nn.Sequential(
            nn.Linear(num_numerical, embed_dim),
            nn.LayerNorm(embed_dim)
        )

        # Transformer Encoder
        encoder_layers = TransformerEncoderLayer(
            d_model=embed_dim,
            nhead=nhead,
            dim_feedforward=512,
            dropout=0.1,
            batch_first=True
        )
        self.transformer = TransformerEncoder(encoder_layers, num_layers)

        # Output head
        self.output = nn.Sequential(
            nn.Linear(embed_dim, 256),
            nn.SiLU(),
            nn.LayerNorm(256),
            nn.Linear(256, 1)
        )

    def forward(self, cat, num):
        # Embed categorical features
        embedded_cat = torch.stack([emb(cat[:, i]) for i, emb in enumerate(self.embeddings)], dim=1)

        # Project numerical features
        projected_num = self.num_proj(num).unsqueeze(1)

        # Combine and pass through Transformer
        x = torch.cat([embedded_cat, projected_num], dim=1)
        x = self.transformer(x)

        # Pooling and output
        x = x.mean(dim=1)
        return self.output(x).squeeze()


# --- Training Function with Advanced LR Scheduling ---
def train_model(model, train_loader, val_loader, epochs=100, max_lr=0.001):
    device = next(model.parameters()).device
    criterion = nn.HuberLoss()  # More robust than MSE
    optimizer = optim.AdamW(model.parameters(), lr=max_lr, weight_decay=1e-4)

    # OneCycleLR for super-convergence
    scheduler = OneCycleLR(
        optimizer,
        max_lr=max_lr,
        epochs=epochs,
        steps_per_epoch=len(train_loader),
        pct_start=0.3,
        anneal_strategy='cos'
    )

    best_val_loss = float('inf')
    early_stop_patience = 10
    patience_counter = 0

    for epoch in range(epochs):
        model.train()
        train_loss = 0.0

        for batch in train_loader:
            optimizer.zero_grad()

            cat = batch['cat'].to(device)
            num = batch['num'].to(device)
            target = batch['target'].to(device)

            outputs = model(cat, num)
            loss = criterion(outputs, target)

            loss.backward()
            nn.utils.clip_grad_norm_(model.parameters(), 1.0)  # Gradient clipping
            optimizer.step()
            scheduler.step()

            train_loss += loss.item()

        # Validation
        val_loss = evaluate(model, val_loader, criterion)
        train_loss /= len(train_loader)

        print(
            f"Epoch {epoch + 1}/{epochs} | Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f} | LR: {scheduler.get_last_lr()[0]:.2e}")

        # Early stopping
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(model.state_dict(), 'best_model.pth')
            patience_counter = 0
        else:
            patience_counter += 1
            if patience_counter >= early_stop_patience:
                print("Early stopping triggered!")
                break

    model.load_state_dict(torch.load('best_model.pth'))
    return model


# --- Evaluation Function ---
def evaluate(model, loader, criterion):
    model.eval()
    total_loss = 0.0
    with torch.no_grad():
        for batch in loader:
            cat = batch['cat'].to(device)
            num = batch['num'].to(device)
            target = batch['target'].to(device)
            outputs = model(cat, num)
            total_loss += criterion(outputs, target).item()
    return total_loss / len(loader)


# --- Main Execution ---
if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")

    # Load and preprocess data
    df, target, cat_cols, num_cols = load_and_preprocess_data()
    train_df, test_df = train_test_split(df, test_size=0.2, random_state=42)

    # Create datasets
    train_dataset = TabularDataset(train_df, target, cat_cols, num_cols)
    val_dataset = TabularDataset(test_df, target, cat_cols, num_cols)

    # Data loaders
    batch_size = 128
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size)

    # Initialize model
    num_categories = [train_df[col].nunique() + 1 for col in cat_cols]  # +1 for padding
    model = TransformerModel(
        num_categories=num_categories,
        num_numerical=len(num_cols),
        embed_dim=128,
        nhead=8,
        num_layers=4
    ).to(device)

    # Train
    model = train_model(model, train_loader, val_loader, epochs=100, max_lr=0.001)

    # Final evaluation
    predictions = []
    targets = []
    model.eval()
    with torch.no_grad():
        for batch in val_loader:
            cat = batch['cat'].to(device)
            num = batch['num'].to(device)
            targets.extend(batch['target'].numpy())
            predictions.extend(model(cat, num).cpu().numpy())

    # Metrics
    rmse = np.sqrt(mean_squared_error(targets, predictions))
    mae = mean_absolute_error(targets, predictions)
    r2 = r2_score(targets, predictions)
    print(f"\nFinal Metrics: RMSE={rmse:.4f}, MAE={mae:.4f}, R²={   r2:.4f}")