import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.nn import TransformerEncoder, TransformerEncoderLayer

# Set random seed for reproducibility
torch.manual_seed(42)
np.random.seed(42)


def get_pd_merged():
    pd.set_option('display.max_columns', None)
    df_summary = pd.read_excel(r"C:\Users\Administrator\PycharmProjects\PythonProject1\data\bwic.xlsx")
    df_detail = pd.read_excel(r"C:\Users\Administrator\PycharmProjects\PythonProject1\data\bid.xlsx")
    df_detail.reset_index(inplace=True)
    df_res = df_summary.merge(df_detail, how='inner', on='bwic_id')
    df_res.drop(['bwic_id', 'bid_id'], axis=1, inplace=True)
    print(f"df_detail count num: {df_detail.shape[0]}")
    return df_res


def reset_train_df(df: pd.DataFrame):
    df = df.drop('index', axis=1)
    return df


class TabularDataset(Dataset):
    def __init__(self, data, target, cat_cols, num_cols):
        self.target = target.values
        self.cat_cols = cat_cols
        self.num_cols = num_cols

        # Convert categorical columns to category codes
        self.cat_data = data[cat_cols].apply(lambda x: x.astype('category').cat.codes).values
        self.num_data = data[num_cols].values

    def __len__(self):
        return len(self.target)

    def __getitem__(self, idx):
        return {
            'cat': torch.tensor(self.cat_data[idx], dtype=torch.long),
            'num': torch.tensor(self.num_data[idx], dtype=torch.float32),
            'target': torch.tensor(self.target[idx], dtype=torch.float32)
        }


# Update the TransformerModel class to use batch_first=True
class TransformerModel(nn.Module):
    def __init__(self, num_categories, num_numerical, embed_dim=64, nhead=4, num_layers=2):
        super(TransformerModel, self).__init__()

        # Categorical embeddings
        self.embeddings = nn.ModuleList([
            nn.Embedding(num_categories[i], embed_dim) for i in range(len(num_categories))
        ])

        # Numerical features projection
        self.num_proj = nn.Linear(num_numerical, embed_dim)

        # Transformer with batch_first=True
        encoder_layers = TransformerEncoderLayer(
            d_model=embed_dim,
            nhead=nhead,
            dim_feedforward=256,
            dropout=0.1,
               # Add this parameter
        )
        self.transformer = TransformerEncoder(encoder_layers, num_layers)

        # Output
        self.output = nn.Sequential(
            nn.Linear(embed_dim, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, 1)
        )

    def forward(self, cat, num):
        # Embed categorical features
        embedded_cat = [emb(cat[:, i]) for i, emb in enumerate(self.embeddings)]
        embedded_cat = torch.stack(embedded_cat, dim=1)  # [batch, num_cat, embed_dim]

        # Project numerical features
        projected_num = self.num_proj(num).unsqueeze(1)  # [batch, 1, embed_dim]

        # Combine features - now batch is first dimension
        x = torch.cat([embedded_cat, projected_num], dim=1)  # [batch, num_cat+1, embed_dim]

        # No need to permute dimensions since we're using batch_first=True
        x = self.transformer(x)

        # Take the mean across sequence dimension
        x = x.mean(dim=1)  # Changed from dim=0 to dim=1 since batch is first

        # Output
        return self.output(x).squeeze()


def train_model(model, train_loader, val_loader, epochs=100, lr=0.001):
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=lr)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5)

    best_val_loss = float('inf')

    for epoch in range(epochs):
        model.train()
        train_loss = 0.0

        for batch in train_loader:
            optimizer.zero_grad()

            cat = batch['cat'].to(device)
            num = batch['num'].to(device)
            target = batch['target'].to(device)

            outputs = model(cat, num)
            loss = criterion(outputs, target)

            loss.backward()
            optimizer.step()

            train_loss += loss.item()

        # Validation
        model.eval()
        val_loss = 0.0
        with torch.no_grad():
            for batch in val_loader:
                cat = batch['cat'].to(device)
                num = batch['num'].to(device)
                target = batch['target'].to(device)

                outputs = model(cat, num)
                val_loss += criterion(outputs, target).item()

        train_loss /= len(train_loader)
        val_loss /= len(val_loader)

        scheduler.step(val_loss)

        # Early stopping check
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(model.state_dict(), 'best_transformer_model.pth')

        print(f'Epoch {epoch + 1}/{epochs} - Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}')

    # Load best model
    model.load_state_dict(torch.load('best_transformer_model.pth'))
    return model


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")

    target = "starting_amount"
    df_data = get_pd_merged()
    df_data = reset_train_df(df_data)

    # Identify categorical and numerical columns
    cat_cols = ['cusip', 'sett_country', 'created_by', 'request_cp', 'client_id', 'bid_successful']
    num_cols = [col for col in df_data.columns if col not in cat_cols + [target]]

    # Split data
    train, test = train_test_split(df_data, test_size=0.2, random_state=42)

    # Create datasets
    train_dataset = TabularDataset(train, train[target], cat_cols, num_cols)
    test_dataset = TabularDataset(test, test[target], cat_cols, num_cols)

    # Create data loaders
    batch_size = 64
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    # Get number of categories for each categorical column
    num_categories = [train[col].nunique() for col in cat_cols]

    # Initialize model
    model = TransformerModel(
        num_categories=num_categories,
        num_numerical=len(num_cols),
        embed_dim=64,
        nhead=4,
        num_layers=2
    ).to(device)

    # Train model
    model = train_model(model, train_loader, test_loader, epochs=100, lr=0.001)

    # Evaluate
    model.eval()
    predictions = []
    targets = []

    with torch.no_grad():
        for batch in test_loader:
            cat = batch['cat'].to(device)
            num = batch['num'].to(device)
            target = batch['target'].to(device)

            outputs = model(cat, num)
            predictions.extend(outputs.cpu().numpy())
            targets.extend(target.cpu().numpy())

    predictions = np.array(predictions)
    targets = np.array(targets)

    # Calculate metrics
    rmse = np.sqrt(mean_squared_error(targets, predictions))
    mae = mean_absolute_error(targets, predictions)
    r2 = r2_score(targets, predictions)

    print(f"RMSE: {rmse:.4f}, MAE: {mae:.4f}, R2: {r2:.4f}")

    # Plot results
    plt.figure(figsize=(20, 10))
    plt.plot(np.arange(len(targets)), targets, color='r', label='true y')
    plt.plot(np.arange(len(predictions)), predictions, color='g', label='pred y')
    plt.title('Test Y & Predict Y')
    plt.legend(loc='upper right')
    plt.tight_layout()
    plt.show()

    # Create output DataFrame
    output = pd.DataFrame({
        'cusip': test['cusip'].values,
        'starting_amount': predictions
    })
    print(output)