import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from data_loader import load_data, QueryPairDataset
from config import Config


class SemanticMatchModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.embedding = nn.Embedding(
            Config.VOCAB_SIZE,
            Config.EMBEDDING_DIM,
            padding_idx=Config.PADDING_IDX
        )
        self.encoder = nn.LSTM(
            input_size=Config.EMBEDDING_DIM,
            hidden_size=Config.HIDDEN_DIM,
            batch_first=True,
            bidirectional=False,
            num_layers=1
        )
        self.classifier = nn.Sequential(
            nn.Linear(Config.HIDDEN_DIM * 4, 64),
            nn.ReLU(),
            nn.Linear(64, 1),
            nn.Sigmoid()
        )

    def forward(self, q1, q2):
        # Embedding layer
        q1_emb = self.embedding(q1)
        q2_emb = self.embedding(q2)

        # LSTM encoder
        _, (h1, _) = self.encoder(q1_emb)
        _, (h2, _) = self.encoder(q2_emb)

        # Handle dimensions
        h1 = h1[-1]  # Take the last layer's hidden state
        h2 = h2[-1]

        # Feature engineering
        features = torch.cat([h1, h2, h1 - h2, h1 * h2], dim=1)

        return self.classifier(features)


def train():
    # Set random seed for reproducibility
    torch.manual_seed(Config.SEED)
    np.random.seed(Config.SEED)

    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")

    # Load and split data
    df = load_data()
    train_df = df.sample(frac=0.8, random_state=Config.SEED)
    val_df = df.drop(train_df.index)

    # Create datasets
    train_dataset = QueryPairDataset(train_df)
    val_dataset = QueryPairDataset(val_df)

    # Debug info
    print(f"\nData Info:")
    print(f"Training samples: {len(train_dataset)}")
    print(f"Validation samples: {len(val_dataset)}")
    sample = train_dataset[0]
    print(f"Sample data - query1 shape: {sample['query1_ids'].shape}, "
          f"query2 shape: {sample['query2_ids'].shape}, "
          f"label: {sample['labels']}")

    # Initialize model, loss and optimizer
    model = SemanticMatchModel().to(device)
    criterion = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=Config.LEARNING_RATE)

    # Training loop
    print("\nStarting training...")
    for epoch in range(Config.EPOCHS):
        model.train()
        total_loss = 0
        correct = 0
        total = 0

        train_loader = DataLoader(
            train_dataset,
            batch_size=Config.BATCH_SIZE,
            shuffle=True,
            collate_fn=QueryPairDataset.collate_fn
        )

        for batch_idx, batch in enumerate(train_loader):
            q1 = batch['query1_ids'].to(device)
            q2 = batch['query2_ids'].to(device)
            labels = batch['labels'].to(device).float()

            # Forward pass
            optimizer.zero_grad()
            outputs = model(q1, q2)
            loss = criterion(outputs.squeeze(), labels)

            # Backward and optimize
            loss.backward()
            optimizer.step()

            # Calculate statistics
            total_loss += loss.item()
            predicted = (outputs > 0.5).float()
            correct += (predicted.squeeze() == labels).sum().item()
            total += labels.size(0)

            # Print batch progress
            if (batch_idx + 1) % 10 == 0:
                print(f"Epoch [{epoch + 1}/{Config.EPOCHS}], "
                      f"Batch [{batch_idx + 1}/{len(train_loader)}], "
                      f"Loss: {loss.item():.4f}")

        # Epoch statistics
        avg_loss = total_loss / len(train_loader)
        accuracy = 100 * correct / total
        print(f"\nEpoch [{epoch + 1}/{Config.EPOCHS}], "
              f"Average Loss: {avg_loss:.4f}, "
              f"Training Accuracy: {accuracy:.2f}%")

        # Validation
        model.eval()
        with torch.no_grad():
            val_correct = 0
            val_total = 0
            val_loader = DataLoader(
                val_dataset,
                batch_size=Config.BATCH_SIZE,
                collate_fn=QueryPairDataset.collate_fn
            )

            for val_batch in val_loader:
                q1 = val_batch['query1_ids'].to(device)
                q2 = val_batch['query2_ids'].to(device)
                labels = val_batch['labels'].to(device).float()

                outputs = model(q1, q2)
                predicted = (outputs > 0.5).float()
                val_correct += (predicted.squeeze() == labels).sum().item()
                val_total += labels.size(0)

            val_accuracy = 100 * val_correct / val_total
            print(f"Validation Accuracy: {val_accuracy:.2f}%")

    # Save model
    torch.save(model.state_dict(), '../../user_data/model_data/model.pth')
    print("\nTraining completed. Model saved to user_data/model_data/model.pth")


if __name__ == "__main__":
    train()