import argparse
import os

from sklearn.metrics import confusion_matrix
from torch.optim.lr_scheduler import StepLR

from openpyxl import writer
from torch.ao.pruning import scheduler
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from sklearn.model_selection import KFold

gpus = [0, 1]
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, gpus))
import numpy as np
import math
import glob
import random
import itertools
import datetime
import time
import datetime
import sys
import scipy.io
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid

from torch.utils.data import DataLoader
from torch.autograd import Variable
from torchsummary import summary
import torch.autograd as autograd
from torchvision.models import vgg19

import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.nn.init as init

from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
from sklearn.decomposition import PCA

import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.utils.data.dataset import random_split
from sklearn.model_selection import train_test_split

from torch import nn, optim, autocast
from torch import Tensor
from PIL import Image
from torchvision.transforms import Compose, Resize, ToTensor
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
# from common_spatial_pattern import csp
from torchviz import make_dot
import matplotlib.pyplot as plt
# from torch.utils.tensorboard import SummaryWriter
from torch.backends import cudnn

class DepthwiseSeparableConv(nn.Module):
    """
    Depthwise separable convolution for efficient feature extraction
    """
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
        super(DepthwiseSeparableConv, self).__init__()
        self.depthwise = nn.Conv2d(
            in_channels, in_channels, kernel_size=kernel_size, 
            stride=stride, padding=padding, groups=in_channels
        )
        self.pointwise = nn.Conv2d(
            in_channels, out_channels, kernel_size=1
        )

    def forward(self, x):
        x = self.depthwise(x)
        x = self.pointwise(x)
        return x

class SEBlock(nn.Module):
    """
    Squeeze-and-Excitation block for adaptive channel-wise feature calibration
    """
    def __init__(self, channels, reduction=8):
        super(SEBlock, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channels, channels // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(channels // reduction, channels, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)

class StochasticDepth(nn.Module):
    """
    Stochastic depth for residual blocks during training
    """
    def __init__(self, drop_prob=0.0):
        super(StochasticDepth, self).__init__()
        self.drop_prob = drop_prob

    def forward(self, x):
        if not self.training or self.drop_prob == 0.0:
        return x

        keep_prob = 1.0 - self.drop_prob
        shape = (x.shape[0],) + (1,) * (x.ndim - 1)
        mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
        mask = mask.floor()
        
        return x * mask / keep_prob

class EEGConformer(nn.Module):
    """
    Enhanced EEG Conformer model with improved generalization capabilities
    for cross-subject EEG classification
    """
    def __init__(self,
                 input_channels=1,
                 num_electrodes=22,
                 num_time_points=1000,
                 output_dim=2,
                 emb_size=64,
                 depth=3,           # Reduced depth to avoid overfitting
                 num_heads=4,       # Fewer heads for more stable attention
                 dropout=0.5,       # Increased dropout for better generalization
                 device=None):
        super(EEGConformer, self).__init__()
        self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.num_classes = output_dim
        
        # Initial normalization 
        self.input_norm = nn.BatchNorm2d(input_channels)
        
        # Feature extraction CNN with depthwise separable convolutions
        self.feature_extractor = nn.Sequential(
            # First block
            self.input_norm,
            DepthwiseSeparableConv(input_channels, 32, (1, 25), padding=(0, 12)),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.AvgPool2d((1, 4), stride=(1, 4)),
            nn.Dropout2d(0.3),
            
            # Second block with SE module
            DepthwiseSeparableConv(32, 64, (1, 15), padding=(0, 7)),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            SEBlock(64, reduction=8),
            nn.AvgPool2d((1, 4), stride=(1, 4)),
            nn.Dropout2d(0.4),
            
            # Spatial convolution
            nn.Conv2d(64, emb_size, (num_electrodes, 1), stride=1),
            nn.BatchNorm2d(emb_size),
            nn.ReLU(),
            nn.Dropout(dropout)
            )
        
        # Calculate output sequence length after convolutions
        self.seq_len = self._calculate_seq_len(num_time_points)
        
        # Learnable position embedding
        self.pos_embedding = nn.Parameter(torch.randn(1, self.seq_len, emb_size) * 0.02)
        
        # Transformer encoder with pre-norm architecture for training stability
        encoder_layers = nn.TransformerEncoderLayer(
            d_model=emb_size,
            nhead=num_heads,
            dim_feedforward=emb_size * 2,  # Reduced size for simplicity
            dropout=dropout,
            activation='relu',
            batch_first=True,
            norm_first=True  # Pre-LN architecture for better stability
        )
        self.transformer_encoder = nn.TransformerEncoder(
            encoder_layers, num_layers=depth
        )
        
        # Apply stochastic depth to Transformer layers
        self.stochastic_depth_rate = 0.1
        
        # Final classifier with simpler architecture
        self.classifier = nn.Sequential(
            nn.LayerNorm(emb_size),
            nn.Dropout(dropout),
            nn.Linear(emb_size, output_dim)
        )
        
        # Self-distillation auxiliary classifier
        self.aux_classifier = nn.Sequential(
            nn.LayerNorm(emb_size),
            nn.Dropout(dropout * 0.7),
            nn.Linear(emb_size, output_dim)
        )
        
        # Mixup regularization parameters
        self.mixup_alpha = 0.2
        
        # Feature pooling
        self.global_pool = nn.Sequential(
            Rearrange('b n c -> b c n'),
            nn.AdaptiveAvgPool1d(1),
            Rearrange('b c 1 -> b c')
        )
        
        # Initialize weights
        self._init_weights()
    
    def _calculate_seq_len(self, time_points):
        """Calculate the sequence length after convolution layers"""
        # First convolution + pooling
        t1 = (time_points + 2*12 - 25 + 1) // 4
        # Second convolution + pooling
        t2 = (t1 + 2*7 - 15 + 1) // 4
        return t2
    
    def _init_weights(self):
        """Initialize weights with improved techniques for generalization"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, (nn.BatchNorm2d, nn.LayerNorm)):
                nn.init.constant_(m.weight, 1.0)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                # Use Glorot/Xavier initialization for linear layers
                nn.init.xavier_uniform_(m.weight, gain=1.0)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
    
    def apply_mixup(self, x, alpha=0.2):
        """Apply mixup augmentation during training"""
        if self.training and alpha > 0:
            batch_size = x.size(0)
            # Generate mixup parameters
            lam = np.random.beta(alpha, alpha)
            # Create shuffled indices
            indices = torch.randperm(batch_size, device=x.device)
            # Mix the data
            mixed_x = lam * x + (1 - lam) * x[indices]
            return mixed_x, indices, lam
        return x, None, 1.0
    
    def apply_stochastic_depth(self, x, drop_prob):
        """Apply stochastic depth during training"""
        if self.training and drop_prob > 0:
            keep_prob = 1 - drop_prob
            mask = torch.rand(x.size(0), 1, 1, device=x.device) < keep_prob
            return x * mask / keep_prob
        return x
    
    def forward(self, x, return_features=False, apply_activation=True):
        batch_size = x.size(0)
        
        # Input validation and normalization
        if torch.isnan(x).any() or torch.isinf(x).any():
            x = torch.where(torch.isnan(x) | torch.isinf(x), torch.zeros_like(x), x)
        
        # Apply mixup augmentation during training
        indices, lam = None, 1.0
        if self.training and self.mixup_alpha > 0:
            x, indices, lam = self.apply_mixup(x, self.mixup_alpha)
        
        # CNN feature extraction
        x = self.feature_extractor(x)  # [batch, channels, 1, time]
        x = x.squeeze(2)  # [batch, channels, time]
        
        # Prepare for transformer - transpose dimensions
        x = x.transpose(1, 2)  # [batch, time, channels]
        
        # Add positional encoding
        if x.size(1) <= self.pos_embedding.size(1):
            x = x + self.pos_embedding[:, :x.size(1), :]
        else:
            # If sequence is longer than positional embedding, use interpolation
            pos_embed = F.interpolate(
                self.pos_embedding.transpose(1, 2), 
                size=x.size(1),
                mode='linear',
                align_corners=False
            ).transpose(1, 2)
            x = x + pos_embed
        
        # Numerical stability check before transformer
        if torch.isnan(x).any() or torch.isinf(x).any():
            x = torch.where(torch.isnan(x) | torch.isinf(x), torch.zeros_like(x), x)
        
        # Apply transformer layers
        for i in range(len(self.transformer_encoder.layers)):
            # Get the current transformer layer
            layer = self.transformer_encoder.layers[i]
            
            # Calculate stochastic depth rate for this layer
            # Increase dropout linearly with depth
            layer_drop_prob = self.stochastic_depth_rate * (i + 1) / len(self.transformer_encoder.layers)
            
            # Forward pass through the layer
            x_residual = layer(x)
            
            # Apply stochastic depth
            if self.training:
                x_residual = self.apply_stochastic_depth(x_residual, layer_drop_prob)
            
            # Add residual connection
            x = x + x_residual
        
        # Extract [CLS] token for auxiliary classification
        cls_token = x[:, 0]
        aux_logits = self.aux_classifier(cls_token)
        
        # Global pooling for main classification
        x_pooled = self.global_pool(x)
        
        # Return features if requested
        if return_features:
            return x_pooled
        
        # Main classifier
        logits = self.classifier(x_pooled)
        
        # Handle numerical instabilities
        if torch.isnan(logits).any() or torch.isinf(logits).any():
            logits = torch.where(torch.isnan(logits) | torch.isinf(logits), 
                                torch.zeros_like(logits), logits)
        
        # During training, return both main and auxiliary logits
        if self.training:
            return logits, aux_logits
        
        # For inference, just return the main logits or probabilities
        if not apply_activation:
            return logits
            
        # Apply activation based on the task
        if self.num_classes == 1 or (self.num_classes == 2 and logits.shape[1] == 1):
            return torch.sigmoid(logits)
        else:
            return F.softmax(logits, dim=1)
