import argparse
import os

from sklearn.metrics import confusion_matrix
from torch.optim.lr_scheduler import StepLR

from openpyxl import writer

from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from sklearn.model_selection import KFold

gpus = [0, 1]
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, gpus))
import numpy as np
import math
import glob
import random
import itertools
import datetime
import time
import datetime
import sys
import scipy.io
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid

from torch.utils.data import DataLoader
from torch.autograd import Variable
from torchsummary import summary
import torch.autograd as autograd
from torchvision.models import vgg19

import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.nn.init as init
from torch.ao.pruning import scheduler
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
from sklearn.decomposition import PCA

import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.utils.data.dataset import random_split
from sklearn.model_selection import train_test_split

from torch import nn, optim, autocast
from torch import Tensor
from PIL import Image
from torchvision.transforms import Compose, Resize, ToTensor
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
# from common_spatial_pattern import csp
from torchviz import make_dot
import matplotlib.pyplot as plt
# from torch.utils.tensorboard import SummaryWriter
from torch.backends import cudnn

class PatchEmbedding(nn.Module):
    def __init__(self, emb_size=40):
        super().__init__()
        # 时间卷积
        self.time_conv = nn.Sequential(
            nn.Conv2d(1, 64, (1, 25), (1, 1)),
            nn.BatchNorm2d(64),
            nn.ELU()
        )
        
        # 空间卷积将在forward中动态创建
        self.spatial_conv = None
        
        # 后处理
        self.post_process = nn.Sequential(
            nn.BatchNorm2d(64),
            nn.ELU(),
            nn.AvgPool2d((1, 75), (1, 15)),
            nn.Dropout(0.4),
            nn.Conv2d(64, emb_size, (1, 1), stride=(1, 1)),
            nn.BatchNorm2d(emb_size),
            nn.ELU(),
            Rearrange('b e (h) (w) -> b (h w) e'),
        )
        
        self.emb_size = emb_size

    def forward(self, x: Tensor) -> Tensor:
        batch_size, _, num_electrodes, time_samples = x.shape
        
        try:
            # 应用时间卷积
            x = self.time_conv(x)  # [batch, 64, num_electrodes, time]
            
            # 动态创建或更新空间卷积
            if self.spatial_conv is None or self.spatial_conv[0].weight.size(2) != num_electrodes:
                self.spatial_conv = nn.Sequential(
                    nn.Conv2d(64, 64, (num_electrodes, 1), (1, 1)),
                    nn.BatchNorm2d(64),
                    nn.ELU()
                ).to(x.device)
            
            # 应用空间卷积
            x = self.spatial_conv(x)  # [batch, 64, 1, time]
            
            # 应用后处理
            x = self.post_process(x)  # [batch, seq_len, emb_size]
            
            return x
            
        except Exception as e:
            print(f"PatchEmbedding forward传播错误: {str(e)}")
            print(f"输入x形状: {x.shape}")
            
            # 计算一个合理的序列长度
            seq_len = max(1, time_samples // 15)
            # 返回合理的默认输出
            return torch.zeros(batch_size, seq_len, self.emb_size, device=x.device)


class FeedForwardBlock(nn.Sequential):
    def __init__(self, emb_size, expansion, drop_p=0.3):
        super().__init__(
            nn.Linear(emb_size, expansion * emb_size),
            nn.LayerNorm(expansion * emb_size),
            nn.GELU(),
            nn.Dropout(drop_p),
            nn.Linear(expansion * emb_size, emb_size),
            nn.Dropout(drop_p),
        )


class MultiHeadAttention(nn.Module):
    def __init__(self, emb_size, num_heads, dropout):
        super().__init__()
        self.emb_size = emb_size
        self.num_heads = num_heads
        self.key_dim = emb_size // num_heads
        
        self.keys = nn.Linear(emb_size, self.key_dim * num_heads)
        self.queries = nn.Linear(emb_size, self.key_dim * num_heads)
        self.values = nn.Linear(emb_size, self.key_dim * num_heads)
        
        self.att_drop = nn.Dropout(dropout)
        self.projection = nn.Linear(self.key_dim * num_heads, emb_size)

    def forward(self, x: Tensor, mask: Tensor = None) -> Tensor:
        queries = rearrange(self.queries(x), "b n (h d) -> b h n d", h=self.num_heads)
        keys = rearrange(self.keys(x), "b n (h d) -> b h n d", h=self.num_heads)
        values = rearrange(self.values(x), "b n (h d) -> b h n d", h=self.num_heads)
        
        energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys)
        if mask is not None:
            fill_value = torch.finfo(torch.float32).min
            energy.mask_fill(~mask, fill_value)

        scaling = self.key_dim ** (1 / 2)
        att = F.softmax(energy / scaling, dim=-1)
        att = self.att_drop(att)
        
        out = torch.einsum('bhal, bhlv -> bhav ', att, values)
        out = rearrange(out, "b h n d -> b n (h d)")
        out = self.projection(out)
        return out


class ResidualAdd(nn.Module):
    def __init__(self, fn):
        super().__init__()
        self.fn = fn

    def forward(self, x, **kwargs):
        res = x
        x = self.fn(x, **kwargs)
        x += res
        return x


class GELU(nn.Module):
    def forward(self, input: Tensor) -> Tensor:
        return input * 0.5 * (1.0 + torch.erf(input / math.sqrt(2.0)))


class TransformerEncoderBlock(nn.Sequential):
    def __init__(self,
                 emb_size,
                 num_heads=8,
                 drop_p=0.3,
                 forward_expansion=4,
                 forward_drop_p=0.1):
        super().__init__(
            ResidualAdd(nn.Sequential(
                nn.LayerNorm(emb_size),
                MultiHeadAttention(emb_size, num_heads, drop_p),
                nn.Dropout(drop_p)
            )),
            ResidualAdd(nn.Sequential(
                nn.LayerNorm(emb_size),
                FeedForwardBlock(
                    emb_size, expansion=forward_expansion, drop_p=forward_drop_p),
                nn.Dropout(drop_p)
            )
            ))


class TransformerEncoder(nn.Sequential):
    def __init__(self, depth, emb_size):
        super().__init__(*[TransformerEncoderBlock(emb_size) for _ in range(depth)])


class ClassificationHead(nn.Module):
    def __init__(self, emb_size, n_classes=2):
        super().__init__()

        self.clshead = nn.Sequential(
            Reduce('b n e -> b e', reduction='mean'),
            nn.LayerNorm(emb_size),
            nn.Linear(emb_size, emb_size // 2),
            nn.GELU(),
            nn.Dropout(0.2),
            nn.LayerNorm(emb_size // 2),
            nn.Linear(emb_size // 2, n_classes)
        )

    def forward(self, x):
        out = self.clshead(x)
        return out


class Conformer(nn.Module):
    def __init__(self, emb_size=40, depth=3, n_classes=2, device=None):
        super(Conformer, self).__init__()
        self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.emb_size = emb_size
        self.n_classes = n_classes
        self.patch_embedding = PatchEmbedding(emb_size=emb_size)
        self.transformer_encoder = TransformerEncoder(depth=depth, emb_size=emb_size)
        self.classification_head = ClassificationHead(emb_size=emb_size, n_classes=n_classes)
        
        self.apply(self._init_weights)
        
    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_uniform_(m.weight)
            if m.bias is not None:
                nn.init.zeros_(m.bias)
        elif isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
        elif isinstance(m, (nn.BatchNorm2d, nn.LayerNorm)):
            nn.init.ones_(m.weight)
            nn.init.zeros_(m.bias)

    def forward(self, x, return_features=False, apply_activation=True):
        try:
            batch_size = x.size(0)
            
            # 嵌入层
            x = self.patch_embedding(x)
            
            # Transformer编码器
            transformer_features = self.transformer_encoder(x)
            
            # 返回特征或分类结果
            if return_features:
                return transformer_features
            
            # 分类层
            logits = self.classification_head(transformer_features)
            
            # 如果不需要激活函数（用于CrossEntropyLoss），直接返回logits
            if not apply_activation:
                return logits
                
            # 对于二分类使用sigmoid，多分类使用softmax
            if self.n_classes == 1 or logits.shape[1] == 1:
                return torch.sigmoid(logits)
            else:
                return F.softmax(logits, dim=1)
                
        except Exception as e:
            print(f"Conformer forward传播错误: {str(e)}")
            print(f"输入x形状: {x.shape}")
            
            # 返回合理的默认输出
            batch_size = x.size(0)
            if return_features:
                # 返回一个假的transformer特征
                seq_len = max(1, x.size(3) // 15)  # 估计序列长度
                return torch.zeros(batch_size, seq_len, self.emb_size, device=self.device)
            elif self.n_classes == 1 or self.n_classes == 2:
                return torch.zeros(batch_size, 1, device=self.device)
            else:
                return torch.ones(batch_size, self.n_classes, device=self.device) / self.n_classes

