from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from PyQt5.QtCore import QObject, pyqtSignal
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset

class TrainingCallback(QObject):
    epoch_completed = pyqtSignal(dict)
    training_completed = pyqtSignal(dict)
    
    def on_epoch_end(self, epoch, logs):
        self.epoch_completed.emit({
            'epoch': epoch,
            'loss': logs.get('loss'),
            'val_loss': logs.get('val_loss'),
            'accuracy': logs.get('accuracy'),
            'val_accuracy': logs.get('val_accuracy')
        })
        
    def on_training_end(self, logs):
        self.training_completed.emit(logs)

class NeuralNetwork(nn.Module):
    def __init__(self, input_size):
        super().__init__()
        self.layers = nn.Sequential(
            nn.Linear(input_size, 128),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, 32),
            nn.ReLU(),
            nn.Linear(32, 1),
            nn.Sigmoid()
        )
        
    def forward(self, x):
        return self.layers(x)

class MLModel(QObject):
    training_progress = pyqtSignal(dict)
    training_completed = pyqtSignal(dict)
    
    def __init__(self):
        super().__init__()
        self.model = None
        self.scaler = StandardScaler()
        self.history = []
        
    def prepare_data(self, X, y):
        # Split data
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
        
        # Scale features
        X_train_scaled = self.scaler.fit_transform(X_train)
        X_test_scaled = self.scaler.transform(X_test)
        
        return X_train_scaled, X_test_scaled, y_train, y_test
        
    def train_keras_model(self, X, y):
        X_train, X_test, y_train, y_test = self.prepare_data(X, y)
        
        # Build model
        self.model = Sequential([
            Dense(128, activation='relu', input_shape=(X_train.shape[1],)),
            Dropout(0.3),
            Dense(64, activation='relu'),
            Dropout(0.2),
            Dense(32, activation='relu'),
            Dense(1, activation='sigmoid')
        ])
        
        self.model.compile(optimizer='adam',
                          loss='binary_crossentropy',
                          metrics=['accuracy'])
                          
        # Create callback
        callback = TrainingCallback()
        callback.epoch_completed.connect(lambda x: self.training_progress.emit(x))
        callback.training_completed.connect(lambda x: self.training_completed.emit(x))
        
        # Train model
        history = self.model.fit(
            X_train, y_train,
            epochs=50,
            batch_size=32,
            validation_split=0.2,
            callbacks=[callback],
            verbose=0
        )
        
        # Evaluate model
        y_pred = self.model.predict(X_test)
        y_pred_classes = (y_pred > 0.5).astype(int)
        
        metrics = {
            'accuracy': accuracy_score(y_test, y_pred_classes),
            'precision': precision_score(y_test, y_pred_classes),
            'recall': recall_score(y_test, y_pred_classes),
            'f1': f1_score(y_test, y_pred_classes)
        }
        
        return history.history, metrics
        
    def train_pytorch_model(self, X, y):
        X_train, X_test, y_train, y_test = self.prepare_data(X, y)
        
        # Convert to PyTorch tensors
        X_train = torch.FloatTensor(X_train)
        y_train = torch.FloatTensor(y_train)
        X_test = torch.FloatTensor(X_test)
        y_test = torch.FloatTensor(y_test)
        
        # Create data loaders
        train_dataset = TensorDataset(X_train, y_train)
        train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
        
        # Initialize model
        model = NeuralNetwork(X_train.shape[1])
        criterion = nn.BCELoss()
        optimizer = optim.Adam(model.parameters())
        
        # Training loop
        epochs = 50
        for epoch in range(epochs):
            model.train()
            epoch_loss = 0
            
            for batch_X, batch_y in train_loader:
                optimizer.zero_grad()
                outputs = model(batch_X)
                loss = criterion(outputs, batch_y.unsqueeze(1))
                loss.backward()
                optimizer.step()
                
                epoch_loss += loss.item()
                
            # Emit progress
            self.training_progress.emit({
                'epoch': epoch,
                'loss': epoch_loss / len(train_loader)
            })
            
        # Evaluate model
        model.eval()
        with torch.no_grad():
            y_pred = model(X_test)
            y_pred_classes = (y_pred > 0.5).numpy().astype(int)
            
        metrics = {
            'accuracy': accuracy_score(y_test, y_pred_classes),
            'precision': precision_score(y_test, y_pred_classes),
            'recall': recall_score(y_test, y_pred_classes),
            'f1': f1_score(y_test, y_pred_classes)
        }
        
        self.training_completed.emit(metrics)
        return metrics
        
    def predict(self, X):
        if self.model is None:
            raise ValueError("Model not trained yet")
            
        X_scaled = self.scaler.transform(X)
        return self.model.predict(X_scaled)
        
class RealTimeTrainingVisualizer(QObject):
    update_plot = pyqtSignal(dict)
    
    def __init__(self):
        super().__init__()
        self.training_history = {
            'loss': [],
            'val_loss': [],
            'accuracy': [],
            'val_accuracy': []
        }
        
    def update(self, metrics):
        for key in metrics:
            if key in self.training_history:
                self.training_history[key].append(metrics[key])
                
        self.update_plot.emit(self.training_history) 