import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from Bio import SeqIO
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau


# Configuration parameters
class Config:
    seed = 42
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    batch_size = 6  # Larger batch size for simpler model
    lr = 0.001  # Learning rate
    epochs = 30  # Training epochs
    seq_vocab = "AUCG"  # RNA nucleotides
    coord_dims = 7 * 3  # 7 backbone points with 3 coordinates each
    hidden_dim = 128  # Hidden layer size
    dropout = 0.2  # Dropout rate
    patience = 5  # Early stopping patience

# Simplified Dataset class
class RNADataset(Dataset):
    def __init__(self, coords_dir, seqs_dir):
        self.samples = []
        
        for fname in os.listdir(coords_dir):
            # Load coordinates
            coord = np.load(os.path.join(coords_dir, fname))  # [L, 7, 3]
            coord = np.nan_to_num(coord, nan=0.0)  # Replace NaN with 0
            
            # Load corresponding sequence
            seq_id = os.path.splitext(fname)[0]
            seq = next(SeqIO.parse(os.path.join(seqs_dir, f"{seq_id}.fasta"), "fasta")).seq
            
            # Convert to tensors
            coord_tensor = torch.tensor(coord.reshape(-1, Config.coord_dims), dtype=torch.float32)
            seq_tensor = torch.tensor([Config.seq_vocab.index(c) for c in str(seq)], dtype=torch.long)
            
            self.samples.append((coord_tensor, seq_tensor))
    
    def __len__(self):
        return len(self.samples)
    
    def __getitem__(self, idx):
        return self.samples[idx]

# Collate function for DataLoader
def collate_fn(batch):
    # Pad sequences to same length
    coords, seqs = zip(*batch)
    lengths = [len(s) for s in seqs]
    max_len = max(lengths)
    
    # Pad coordinates
    padded_coords = torch.zeros(len(batch), max_len, Config.coord_dims)
    for i, (coord, _) in enumerate(batch):
        padded_coords[i, :len(coord)] = coord
    
    # Pad sequences
    padded_seqs = torch.zeros(len(batch), max_len, dtype=torch.long)
    for i, (_, seq) in enumerate(batch):
        padded_seqs[i, :len(seq)] = seq
    
    return padded_coords.to(Config.device), padded_seqs.to(Config.device), torch.tensor(lengths)

# Simplified Model
class SimpleRNAModel(nn.Module):
    def __init__(self):
        super().__init__()
        
        # Feature processing
        self.feature_extractor = nn.Sequential(
            nn.Linear(Config.coord_dims, Config.hidden_dim),
            nn.ReLU(),
            nn.Dropout(Config.dropout),
            nn.Linear(Config.hidden_dim, Config.hidden_dim),
            nn.ReLU(),
            nn.Dropout(Config.dropout)
        )
        
        # Sequence processing (like a simple 1D CNN)
        self.conv1 = nn.Conv1d(Config.hidden_dim, Config.hidden_dim, kernel_size=3, padding=1)
        self.conv2 = nn.Conv1d(Config.hidden_dim, Config.hidden_dim, kernel_size=3, padding=1)
        
        # Classification head
        self.classifier = nn.Sequential(
            nn.Linear(Config.hidden_dim, Config.hidden_dim//2),
            nn.ReLU(),
            nn.Dropout(Config.dropout),
            nn.Linear(Config.hidden_dim//2, len(Config.seq_vocab))
        )
    
    def forward(self, x, lengths):
        # x shape: [batch, seq_len, features]
        batch_size, seq_len, _ = x.shape
        
        # Process each position independently
        features = self.feature_extractor(x)  # [batch, seq_len, hidden_dim]
        
        # Apply 1D convolutions along sequence dimension
        features = features.transpose(1, 2)  # [batch, hidden_dim, seq_len]
        features = F.relu(self.conv1(features))
        features = F.relu(self.conv2(features))
        features = features.transpose(1, 2)  # [batch, seq_len, hidden_dim]
        
        # Classify each position
        logits = self.classifier(features)  # [batch, seq_len, 4]
        
        return logits

# Training function
def train(model, loader, optimizer, criterion):
    model.train()
    total_loss = 0
    total_correct = 0
    total_items = 0
    
    for coords, seqs, lengths in loader:
        optimizer.zero_grad()
        
        # Forward pass
        logits = model(coords, lengths)
        
        # Calculate loss (only on actual sequence positions)
        loss = 0
        correct = 0
        for i in range(len(lengths)):
            loss += criterion(logits[i, :lengths[i]], seqs[i, :lengths[i]])
            preds = logits[i, :lengths[i]].argmax(dim=1)
            correct += (preds == seqs[i, :lengths[i]]).sum().item()
        
        loss = loss / len(lengths)
        
        # Backward pass
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
        total_correct += correct
        total_items += lengths.sum().item()
    
    return total_loss / len(loader), total_correct / total_items

# Evaluation function
def evaluate(model, loader, criterion):
    model.eval()
    total_loss = 0
    total_correct = 0
    total_items = 0
    
    with torch.no_grad():
        for coords, seqs, lengths in loader:
            logits = model(coords, lengths)
            
            # Calculate metrics
            loss = 0
            correct = 0
            for i in range(len(lengths)):
                loss += criterion(logits[i, :lengths[i]], seqs[i, :lengths[i]])
                preds = logits[i, :lengths[i]].argmax(dim=1)
                correct += (preds == seqs[i, :lengths[i]]).sum().item()
            
            loss = loss / len(lengths)
            
            total_loss += loss.item()
            total_correct += correct
            total_items += lengths.sum().item()
    
    return total_loss / len(loader), total_correct / total_items

# Main execution
if __name__ == "__main__":
    # Set random seeds
    torch.manual_seed(Config.seed)
    np.random.seed(Config.seed)
    
    # Load dataset
    full_dataset = RNADataset("./saisdata/coords", "./saisdata/seqs")
    
    # Split dataset
    lengths = [len(seq) for _, seq in full_dataset]
    train_idx, test_idx = train_test_split(
        range(len(full_dataset)), 
        test_size=0.2, 
        stratify=np.digitize(lengths, bins=np.quantile(lengths, [0.25, 0.5, 0.75])))
    train_idx, val_idx = train_test_split(
        train_idx, 
        test_size=0.125,  # 0.1 of total
        stratify=np.digitize([lengths[i] for i in train_idx], 
                            bins=np.quantile(lengths, [0.25, 0.5, 0.75])))
    
    train_set = torch.utils.data.Subset(full_dataset, train_idx)
    val_set = torch.utils.data.Subset(full_dataset, val_idx)
    test_set = torch.utils.data.Subset(full_dataset, test_idx)
    
    # Create DataLoaders
    train_loader = DataLoader(train_set, batch_size=Config.batch_size, 
                            shuffle=True, collate_fn=collate_fn)
    val_loader = DataLoader(val_set, batch_size=Config.batch_size, 
                          collate_fn=collate_fn)
    test_loader = DataLoader(test_set, batch_size=Config.batch_size, 
                           collate_fn=collate_fn)
    
    # Initialize model
    model = SimpleRNAModel().to(Config.device)
    optimizer = optim.Adam(model.parameters(), lr=Config.lr)
    criterion = nn.CrossEntropyLoss()
    scheduler = ReduceLROnPlateau(optimizer, 'max', patience=2, factor=0.5, verbose=True)
    
    # Training loop
    best_acc = 0
    no_improve = 0
    
    for epoch in range(Config.epochs):
        train_loss, train_acc = train(model, train_loader, optimizer, criterion)
        val_loss, val_acc = evaluate(model, val_loader, criterion)
        
        # Update learning rate
        scheduler.step(val_acc)
        
        print(f"Epoch {epoch+1}/{Config.epochs}")
        print(f"Train Loss: {train_loss:.4f} | Train Acc: {train_acc:.4f}")
        print(f"Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.4f}")
        
        # Save best model
        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), "best_simple_model.pth")
            no_improve = 0
        else:
            no_improve += 1
            if no_improve >= Config.patience:
                print(f"Early stopping at epoch {epoch+1}")
                break
    
    # Final evaluation
    model.load_state_dict(torch.load("best_simple_model.pth"))
    test_loss, test_acc = evaluate(model, test_loader, criterion)
    print(f"\nTest Accuracy: {test_acc:.4f}")