import os
import numpy as np
import matplotlib.pyplot as plt
import random
import mne
from mne.datasets.sleep_physionet.age import fetch_data
from datetime import date

import torch
from torchvision import transforms, datasets
from torch.utils import data
from torch.utils.data import Dataset, DataLoader

from pylab import mpl

import warnings

from einops import rearrange
from torch import Tensor
import torch.nn.functional as F
import torch.nn as nn
import math

from matplotlib.collections import LineCollection

from matplotlib.colors import ListedColormap, BoundaryNorm

from models.sequence_cmt import Seq_Cross_Transformer_Network  # as Seq_Cross_Transformer_Network
from models.model_blocks import PositionalEncoding, Window_Embedding, Intra_modal_atten, Cross_modal_atten, Feed_forward

from torch.nn import LayerNorm
from torch.nn import MultiheadAttention
from torch.nn import Dropout
from einops.layers.torch import Rearrange
from einops import repeat
from torch.nn import Linear

import json

class PositionalEncoding(nn.Module):

    def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
        super().__init__()
        self.dropout = nn.Dropout(p=dropout)

        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
        pe = torch.zeros(max_len, 1, d_model)
        pe[:, 0, 0::2] = torch.sin(position * div_term)
        pe[:, 0, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)

    def forward(self, x: Tensor) -> Tensor:
        """
        Args:
            x: Tensor, shape [seq_len, batch_size, embedding_dim]
        """
        x = x + self.pe[:x.size(0)]
        return self.dropout(x)

class Window_Embedding(nn.Module):
    def __init__(self, in_channels: int = 1, window_size: int = 50, emb_size: int = 64):
        super(Window_Embedding, self).__init__()

        self.projection_1 = nn.Sequential(
            # using a conv layer instead of a linear one -> performance gains, in=>B,1,3000 out=>B,64,60
            nn.Conv1d(in_channels, emb_size // 4, kernel_size=window_size, stride=window_size),
            nn.LeakyReLU(),
            nn.BatchNorm1d(emb_size // 4),
            # Rearrange('b e s -> b s e'),
        )
        self.projection_2 = nn.Sequential(  #################
            # using a conv layer instead of a linear one -> performance gains, in=>B,1,3000 out=>B,64,60
            nn.Conv1d(in_channels, emb_size // 8, kernel_size=5, stride=5),
            nn.LeakyReLU(),
            nn.Conv1d(emb_size // 8, emb_size // 4, kernel_size=5, stride=5),
            nn.LeakyReLU(),
            nn.Conv1d(emb_size // 4, (emb_size - emb_size // 4) // 2, kernel_size=2, stride=2),
            nn.LeakyReLU(),
            nn.BatchNorm1d((emb_size - emb_size // 4) // 2),
            # Rearrange('b e s -> b s e'),
        )

        self.projection_3 = nn.Sequential(  #################
            # using a conv layer instead of a linear one -> performance gains, in=>B,1,3000 out=>B,64,60
            nn.Conv1d(in_channels, emb_size // 4, kernel_size=25, stride=25),
            nn.LeakyReLU(),
            nn.Conv1d(emb_size // 4, (emb_size - emb_size // 4) // 2, kernel_size=2, stride=2),
            nn.LeakyReLU(),
            nn.BatchNorm1d((emb_size - emb_size // 4) // 2),
            # Rearrange('b e s -> b s e'),
        )

        self.projection_4 = nn.Sequential(
            # using a conv layer instead of a linear one -> performance gains, in=>B,1,3000 out=>B,64,60
            nn.Conv1d(emb_size, emb_size, kernel_size=1, stride=1),
            nn.LeakyReLU(),
            nn.BatchNorm1d(emb_size),
            Rearrange('b e s -> b s e'), )

        # in=>B,64,60 out=>B,64,61
        self.cls_token = nn.Parameter(torch.randn(1, 1, emb_size))
        self.arrange1 = Rearrange('b s e -> s b e')
        # in=>61,B,64 out=>61,B,64
        self.pos = PositionalEncoding(d_model=emb_size)
        # in=>61,B,64 out=>B,61,64
        self.arrange2 = Rearrange('s b e -> b s e ')

    def forward(self, x: Tensor) -> Tensor:
        if x.shape[0] != 1:
            x = x.squeeze().unsqueeze(dim=1)
        b, _, _ = x.shape
        x_1 = self.projection_1(x)  ########################
        x_2 = self.projection_2(x)  ###########
        x_3 = self.projection_3(x)
        x = torch.cat([x_1, x_2, x_3], dim=1)  ##### 2)
        x = self.projection_4(x)
        cls_tokens = repeat(self.cls_token, '() s e -> b s e', b=b)
        # prepend the cls token to the input
        x = torch.cat([cls_tokens, x], dim=1)
        # add position embedding
        x = self.arrange1(x)
        x = self.pos(x)
        x = self.arrange2(x)
        return x

class Intra_modal_atten(nn.Module):
    def __init__(self, d_model=64, nhead=8, dropout=0.1,
                 layer_norm_eps=1e-5, window_size=25, First=True,
                 device=None, dtype=None) -> None:
        super(Intra_modal_atten, self).__init__()
        factory_kwargs = {'device': device, 'dtype': dtype}

        if First == True:
            self.window_embed = Window_Embedding(in_channels=1, window_size=window_size, emb_size=d_model)
        self.norm = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
        self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True,
                                            **factory_kwargs)
        self.dropout = Dropout(dropout)
        self.First = First

    def forward(self, x: Tensor) -> Tensor:
        if self.First == True:
            src = self.window_embed(x)
        else:
            src = x

        src2 = self.self_attn(src, src, src)[0]
        out = src + self.dropout(src2)
        out = self.norm(out)  ########
        return out

class Cross_modal_atten(nn.Module):
    def __init__(self, d_model=64, nhead=8, dropout=0.1,
                 layer_norm_eps=1e-5, First=False,
                 device=None, dtype=None) -> None:

        super(Cross_modal_atten, self).__init__()
        factory_kwargs = {'device': device, 'dtype': dtype}

        if First == True:
            self.cls_token = nn.Parameter(torch.randn(1, 1, d_model))  ######
        self.norm = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
        self.cross_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True,
                                             **factory_kwargs)
        self.dropout = Dropout(dropout)
        self.First = First

    def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
        if len(x1.shape) == 2:
            x = torch.cat([x1.unsqueeze(dim=1), x2.unsqueeze(dim=1)], dim=1)
        else:
            x = torch.cat([x1, x2.unsqueeze(dim=1)], dim=1)
        b, _, _ = x.shape
        if self.First == True:
            cls_tokens = repeat(self.cls_token, '() s e -> b s e', b=b)  ######
            # prepend the cls token to the input
            src = torch.cat([cls_tokens, x], dim=1)  #####
        else:
            src = x
        src2 = self.cross_attn(src, src, src)[0]
        out = src + self.dropout(src2)
        out = self.norm(out)
        return out

class Feed_forward(nn.Module):
    def __init__(self, d_model=64, dropout=0.1, dim_feedforward=512,
                 layer_norm_eps=1e-5,
                 device=None, dtype=None) -> None:
        super(Feed_forward, self).__init__()
        factory_kwargs = {'device': device, 'dtype': dtype}

        self.norm = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
        self.linear1 = Linear(d_model, dim_feedforward, **factory_kwargs)
        self.relu = nn.ReLU()
        self.dropout1 = Dropout(dropout)
        self.linear2 = Linear(dim_feedforward, d_model, **factory_kwargs)
        self.dropout2 = Dropout(dropout)

    def forward(self, x: Tensor) -> Tensor:
        src = x
        src2 = self.linear2(self.dropout1(self.relu(self.linear1(src))))
        out = src + self.dropout2(src2)
        out = self.norm(out)
        return out

class Epoch_Cross_Transformer(nn.Module):
    def __init__(self, d_model=64, dim_feedforward=512, window_size=25):  # filt_ch = 4
        super(Epoch_Cross_Transformer, self).__init__()

        self.eeg_atten = Intra_modal_atten(d_model=d_model, nhead=8, dropout=0.1,
                                           window_size=window_size, First=True)
        self.eog_atten = Intra_modal_atten(d_model=d_model, nhead=8, dropout=0.1,
                                           window_size=window_size, First=True)

        self.cross_atten = Cross_modal_atten(d_model=d_model, nhead=8, dropout=0.1, First=True)

    def forward(self, eeg: Tensor, eog: Tensor):  # ,finetune = False):
        self_eeg = self.eeg_atten(eeg)
        self_eog = self.eog_atten(eog)

        cross = self.cross_atten(self_eeg[:, 0, :], self_eog[:, 0, :])

        cross_cls = cross[:, 0, :].unsqueeze(dim=1)
        cross_eeg = cross[:, 1, :].unsqueeze(dim=1)
        cross_eog = cross[:, 2, :].unsqueeze(dim=1)

        feat_list = [self_eeg, self_eog, cross]

        return cross_cls, feat_list

class SleepEDF_Seq_MultiChan_Dataset_Inference(Dataset):
    def __init__(self, eeg_file, eog_file, device, mean_eeg_l=None, sd_eeg_l=None,
                 mean_eog_l=None, sd_eog_l=None, mean_eeg2_l=None, sd_eeg2_l=None, transform=None,
                 target_transform=None, sub_wise_norm=False, num_seq=5):
        """

        """
        # Get the data

        self.eeg = eeg_file
        self.eog = eog_file
        # self.labels = label_file

        # self.labels = torch.from_numpy(self.labels)

        # bin_labels = np.bincount(self.labels)
        # print(f"Labels count: {bin_labels}")
        print(f"Shape of EEG : {self.eeg.shape} , EOG : {self.eog.shape}")  # , EMG: {self.eeg2.shape}")
        # print(f"Shape of Labels : {self.labels.shape}")

        if sub_wise_norm == True:
            print(f"Reading Subject wise mean and sd")

            self.mean_eeg = mean_eeg_l
            self.sd_eeg = sd_eeg_l
            self.mean_eog = mean_eog_l
            self.sd_eog = sd_eog_l

        self.sub_wise_norm = sub_wise_norm
        self.device = device
        self.transform = transform
        self.target_transform = target_transform
        self.num_seq = num_seq

    def __len__(self):
        return self.eeg.shape[0] - self.num_seq

    def __getitem__(self, idx):
        eeg_data = self.eeg[idx:idx + self.num_seq].squeeze()
        eog_data = self.eog[idx:idx + self.num_seq].squeeze()
        # label = self.labels[idx:idx + self.num_seq, ]

        if self.sub_wise_norm == True:
            eeg_data = (eeg_data - self.mean_eeg[idx]) / self.sd_eeg[idx]
            eog_data = (eog_data - self.mean_eog[idx]) / self.sd_eog[idx]
        elif self.mean and self.sd:
            eeg_data = (eeg_data - self.mean[0]) / self.sd[0]
            eog_data = (eog_data - self.mean[1]) / self.sd[1]
        if self.transform:
            eeg_data = self.transform(eeg_data)
            eog_data = self.transform(eog_data)
        # if self.target_transform:
        # label = self.target_transform(label)
        return eeg_data, eog_data

class Seq_Cross_Transformer_Network(nn.Module):
    def __init__(self, d_model=128, dim_feedforward=512, window_size=25):  # filt_ch = 4
        super(Seq_Cross_Transformer_Network, self).__init__()

        self.epoch_1 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
                                               window_size=window_size)
        self.epoch_2 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
                                               window_size=window_size)
        self.epoch_3 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
                                               window_size=window_size)
        self.epoch_4 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
                                               window_size=window_size)
        self.epoch_5 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
                                               window_size=window_size)
        #
        # self.epoch_6 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
        #                                        window_size=window_size)
        # self.epoch_7 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
        #                                        window_size=window_size)
        # self.epoch_8 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
        #                                        window_size=window_size)
        # self.epoch_9 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
        #                                        window_size=window_size)
        # self.epoch_10 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
        #                                         window_size=window_size)
        # #
        # self.epoch_11 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
        #                                         window_size=window_size)
        # self.epoch_12 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
        #                                         window_size=window_size)
        # self.epoch_13 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
        #                                         window_size=window_size)
        # self.epoch_14 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
        #                                         window_size=window_size)
        # self.epoch_15 = Epoch_Cross_Transformer(d_model=d_model, dim_feedforward=dim_feedforward,
        #                                         window_size=window_size)
        #
        # self.epoch_16 = Epoch_Cross_Transformer(d_model = d_model, dim_feedforward=dim_feedforward,
        #                                         window_size = window_size)
        # self.epoch_17 = Epoch_Cross_Transformer(d_model = d_model, dim_feedforward=dim_feedforward,
        #                                         window_size = window_size)
        # self.epoch_18 = Epoch_Cross_Transformer(d_model = d_model, dim_feedforward=dim_feedforward,
        #                                         window_size = window_size)
        # self.epoch_19 = Epoch_Cross_Transformer(d_model = d_model, dim_feedforward=dim_feedforward,
        #                                         window_size = window_size)
        # self.epoch_20 = Epoch_Cross_Transformer(d_model = d_model, dim_feedforward=dim_feedforward,
        #                                         window_size = window_size)
        # #
        #         # self.epoch_21 = Epoch_Cross_Transformer(d_model = d_model, dim_feedforward=dim_feedforward,
        #                                                 window_size = window_size)

        self.seq_atten = Intra_modal_atten(d_model=d_model, nhead=8, dropout=0.1, window_size=window_size, First=False)

        self.ff_net = Feed_forward(d_model=d_model, dropout=0.1, dim_feedforward=dim_feedforward)

        self.mlp_1 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))  ##################
        self.mlp_2 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        self.mlp_3 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        self.mlp_4 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        self.mlp_5 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        #
        # self.mlp_6 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))  ##################
        # self.mlp_7 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        # self.mlp_8 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        # self.mlp_9 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        # self.mlp_10 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        # #
        # self.mlp_11 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))  ##################
        # self.mlp_12 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        # self.mlp_13 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        # self.mlp_14 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        # self.mlp_15 = nn.Sequential(nn.Flatten(), nn.Linear(d_model, 5))
        #
        # self.mlp_16    = nn.Sequential(nn.Flatten(),nn.Linear(d_model,5))  ##################
        # self.mlp_17    = nn.Sequential(nn.Flatten(),nn.Linear(d_model,5))
        # self.mlp_18    = nn.Sequential(nn.Flatten(),nn.Linear(d_model,5))
        # self.mlp_19    = nn.Sequential(nn.Flatten(),nn.Linear(d_model,5))
        # self.mlp_20    = nn.Sequential(nn.Flatten(),nn.Linear(d_model,5))
        # self.mlp_21    = nn.Sequential(nn.Flatten(),nn.Linear(d_model,5))

    def forward(self, eeg: Tensor, eog: Tensor, num_seg=5):
        # eeg_epoch = eeg[:,:,0,:]
        # eog_epoch = eog[:,:,0,:]
        # for ep in range(1,num_seg):
        #     eeg_epoch = torch.cat((eeg_epoch,eeg[:,:,ep,:]),dim=-1)
        #     eog_epoch = torch.cat((eog_epoch,eog[:,:,ep,:]),dim=-1)

        # print(eeg_epoch.shape,eog_epoch.shape)
        epoch_1, feat_1 = self.epoch_1(eeg[:, :, 0, :], eog[:, :, 0, :])  # [0]
        epoch_2, feat_2 = self.epoch_2(eeg[:, :, 1, :], eog[:, :, 1, :])  # [0]
        epoch_3, feat_3 = self.epoch_3(eeg[:, :, 2, :], eog[:, :, 2, :])  # [0]
        epoch_4, feat_4 = self.epoch_4(eeg[:, :, 3, :], eog[:, :, 3, :])  # [0]
        epoch_5, feat_5 = self.epoch_5(eeg[:, :, 4, :], eog[:, :, 4, :])  # [0]
        # print(epoch_1.shape,epoch_5.shape)
        # epoch_6, feat_6 = self.epoch_6(eeg[:, :, 5, :], eog[:, :, 5, :])  # [0]
        # epoch_7, feat_7 = self.epoch_7(eeg[:, :, 6, :], eog[:, :, 6, :])  # [0]
        # epoch_8, feat_8 = self.epoch_8(eeg[:, :, 7, :], eog[:, :, 7, :])  # [0]
        # epoch_9, feat_9 = self.epoch_9(eeg[:, :, 8, :], eog[:, :, 8, :])  # [0]
        # epoch_10, feat_10 = self.epoch_10(eeg[:, :, 9, :], eog[:, :, 9, :])  # [0]
        # print(epoch_1.shape,epoch_5.shape)
        # epoch_11, feat_11 = self.epoch_11(eeg[:, :, 10, :], eog[:, :, 10, :])  # [0]
        # epoch_12, feat_12 = self.epoch_12(eeg[:, :, 11, :], eog[:, :, 11, :])  # [0]
        # epoch_13, feat_13 = self.epoch_13(eeg[:, :, 12, :], eog[:, :, 12, :])  # [0]
        # epoch_14, feat_14 = self.epoch_14(eeg[:, :, 13, :], eog[:, :, 13, :])  # [0]
        # epoch_15, feat_15 = self.epoch_15(eeg[:, :, 14, :], eog[:, :, 14, :])  # [0]
        # print(epoch_1.shape,epoch_5.shape)
        # epoch_16 = self.epoch_16(eeg[:,:,15,:],eog[:,:,15,:])[0]
        # epoch_17 = self.epoch_17(eeg[:,:,16,:],eog[:,:,16,:])[0]
        # epoch_18 = self.epoch_18(eeg[:,:,17,:],eog[:,:,17,:])[0]
        # epoch_19 = self.epoch_19(eeg[:,:,18,:],eog[:,:,18,:])[0]
        # epoch_20 = self.epoch_20(eeg[:,:,19,:],eog[:,:,19,:])[0]
        # # print(epoch_1.shape,epoch_5.shape)
        # epoch_21 = self.epoch_21(eeg[:,:,20,:],eog[:,:,20,:])[0]

        # seq =  torch.cat([epoch_1, epoch_2,epoch_3,epoch_4,epoch_5], dim=1)
        seq = torch.cat([epoch_1, epoch_2, epoch_3, epoch_4, epoch_5], dim=1)
        # epoch_16, epoch_17,epoch_18,epoch_19,epoch_20,epoch_21], dim=1)
        seq = self.seq_atten(seq)
        # print(seq.shape)
        seq = self.ff_net(seq)
        # print(seq.shape)
        out_1 = self.mlp_1(seq[:, 0, :])
        out_2 = self.mlp_2(seq[:, 1, :])
        out_3 = self.mlp_3(seq[:, 2, :])
        out_4 = self.mlp_4(seq[:, 3, :])
        out_5 = self.mlp_5(seq[:, 4, :])
        #
        # out_6 = self.mlp_6(seq[:, 5, :])
        # out_7 = self.mlp_7(seq[:, 6, :])
        # out_8 = self.mlp_8(seq[:, 7, :])
        # out_9 = self.mlp_9(seq[:, 8, :])
        # out_10 = self.mlp_10(seq[:, 9, :])
        #
        # out_11 = self.mlp_11(seq[:, 10, :])
        # out_12 = self.mlp_12(seq[:, 11, :])
        # out_13 = self.mlp_13(seq[:, 12, :])
        # out_14 = self.mlp_14(seq[:, 13, :])
        # out_15 = self.mlp_15(seq[:, 14, :])
        #
        #         out_16 = self.mlp_16(seq[:,15,:])
        #         out_17 = self.mlp_17(seq[:,16,:])
        #         out_18 = self.mlp_18(seq[:,17,:])
        #         out_19 = self.mlp_19(seq[:,18,:])
        #         out_20 = self.mlp_20(seq[:,19,:])

        #         out_21 = self.mlp_21(seq[:,20,:])
        feat_list = [feat_1, feat_2, feat_3, feat_4, feat_5, seq]
        # print(out_1.shape)
        return [out_1, out_2, out_3, out_4, out_5], feat_list  # ,out_16,out_17,out_18,out_19,out_20,out_21]

def plot_interpret(i, x, y, dydx, fig, axs, axs_no, signal_type="EEG"):
    points = np.array([x, y]).T.reshape(-1, 1, 2)
    segments = np.concatenate([points[:-1], points[1:]], axis=1)
    # plt.figure(figsize = (30,5))
    # plt.figure(figsize=(25,5))
    # plt.plot(x,dydx)
    # plt.title(f"Attention Map for Class {label}  {signal_type} ")
    # plt.xlim(x.min(),x.max())
    # plt.colorbar()

    # fig, axs = plt.subplots(2, 1, sharex=True, sharey=True,figsize = (30,10))

    # Create a continuous norm to map from data points to colors
    norm = plt.Normalize(dydx.min(), dydx.max())

    lc = LineCollection(segments, cmap='Reds', norm=norm)
    # Set the values used for colormapping
    lc.set_array(dydx)
    lc.set_linewidth(15)
    line = axs[axs_no[0]][axs_no[1]].add_collection(lc)
    # fig.colorbar(line, ax=axs[axs_no[0]][axs_no[1]])
    # fig.colorbar(line, ax=axs[1])
    # axs[axs_no[0]][axs_no[1]].set_xlabel(f"{signal_type}",fontsize = 100,labelpad = 20)
    axs[axs_no[0]][axs_no[1]].set_title(f'Epoch {i + 1} {signal_type}', fontsize=100)
    # axs[i].set_xlabel('Signal',fontsize = 100)
    # axs[axs_no[0]][axs_no[1]].axis('off')
    # Hide X and Y axes label marks
    axs[axs_no[0]][axs_no[1]].xaxis.set_tick_params(labelbottom=False)
    axs[axs_no[0]][axs_no[1]].yaxis.set_tick_params(labelleft=False)

    # Hide X and Y axes tick marks
    axs[axs_no[0]][axs_no[1]].set_xticks([])
    axs[axs_no[0]][axs_no[1]].set_yticks([])
    axs[axs_no[0]][axs_no[1]].set_xlim(x.min(), x.max())
    axs[axs_no[0]][axs_no[1]].set_ylim(y.min() - 0.2, y.max() + 0.2)

def atten_interpret(q, k):
    atten_weights = torch.softmax((q @ k.transpose(-2, -1) / math.sqrt(q.size(-1))), dim=-1)
    return atten_weights

def new_signal_extract_2(path_1, channel='eeg1', filter=True, freq=[0.2, 40]):
    all_channels = ('EEG Fpz-Cz', 'EEG Pz-Oz', 'EOG horizontal', 'EMG submental', 'Resp oro-nasal', 'Temp rectal', 'Event marker')

    data = [path_1]
    signal2idx = {"eeg1": 0, "eeg2": 1, "eog": 2, "emg": 3}

    all_channels_list = list(all_channels)
    all_channels_list.remove(all_channels[signal2idx[channel]])
    exclude_channels = tuple(all_channels_list)

    sleep_signals = mne.io.read_raw_edf(data[0], verbose=True, exclude=exclude_channels, preload=True)

    # Filtering
    tmax = 30. - 1. / sleep_signals.info['sfreq']

    if filter == True:
        sleep_signals = sleep_signals.copy().filter(l_freq=freq[0], h_freq=freq[1])

    duration = 30
    epochs = mne.make_fixed_length_epochs(sleep_signals, duration=duration, preload=True)

    # Calculate mean and std of the signal epochs
    signal_mean = np.mean(epochs)
    signal_std = np.std(epochs)

    main_ext_raw_data = epochs.get_data()
    main_sub_len = np.array([len(epochs)])
    main_mean = np.tile(signal_mean, (len(epochs), 1)).squeeze()
    main_std = np.tile(signal_std, (len(epochs), 1)).squeeze()

    return main_ext_raw_data, main_sub_len, main_mean, main_std

def main(file_name, start_time = 0):
    # 创建picture文件夹保存图片
    current_directory = os.getcwd()
    new_folder_name = "static/picture"
    full_folder_path = os.path.join(current_directory, new_folder_name)
    os.makedirs(full_folder_path, exist_ok=True)

    # 设置显示中文字体
    mpl.rcParams["font.sans-serif"] = ["SimHei"]
    # start_time_point = 38820  ### Set the time point in the signal to visualize.  ( For figure 6 in the paper Subject 38 Day 1 Start time point 38820)
    file_name = file_name

    path_1 = f'{file_name}'

    eeg1_1,  len_1, eeg1_m1, eeg1_std1 = new_signal_extract_2(path_1, channel='eeg1', filter=True, freq=[0.2, 40])
    eog_1, _, eog_m1, eog_std1 = new_signal_extract_2(path_1,  channel='eog', filter=True, freq=[0.2, 40])

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    num_seq = 5
    # up_of_t = len(eeg1_1) - num_seq - 1
    infer_dataset = SleepEDF_Seq_MultiChan_Dataset_Inference(eeg_file=eeg1_1,
                                                             eog_file=eog_1,
                                                             # label_file=labels_1,
                                                             device=device, mean_eeg_l=eeg1_m1, sd_eeg_l=eeg1_std1,
                                                             mean_eog_l=eog_m1, sd_eog_l=eog_std1,
                                                             sub_wise_norm=True, num_seq=num_seq, # wait for change
                                                             transform=transforms.Compose([
                                                                 transforms.ToTensor()
                                                             ]))

    infer_data_loader = data.DataLoader(infer_dataset, batch_size=1, shuffle=False)  # 16
    # len(infer_data_loader)

    eeg_data, eog_data= next(iter(infer_data_loader))


    # print(f"EEG batch shape: {eeg_data.size()}")
    # print(f"EOG batch shape: {eog_data.size()}")
    # print(f"EMG batch shape: {eeg2_data.size()}")
    # print(f"Labels batch shape: {label.size()}")

    eeg_data_temp = eeg_data[0].squeeze()  # (0)
    eog_data_temp = eog_data[0].squeeze()  # (0)

    # print(eeg_data_temp.shape)

    t = np.arange(0, 30, 1 / 100)
    plt.figure(figsize=(10, 5))
    plt.plot(eeg_data_temp[0].squeeze())
    plt.plot(eog_data_temp[0].squeeze() + 5)
    plt.title(f"EEG & EOG表格")

    save_dir = "static/picture"
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    save_path = os.path.join(save_dir, "picture_1.jpg")
    plt.savefig(save_path, dpi=300)

    # plt.show()# show放在savefig前会输出空白图片,这里修改了

    test_model = torch.load('./checkpoint_model_best_acc.pth.tar', map_location=device, weights_only=False)
    test_model.eval()
    # print(sum(p.numel() for p in test_model.parameters() if p.requires_grad))


    """# Get Predictions for the Subject"""
    warnings.filterwarnings("ignore")

    batch_size = len(infer_data_loader)
    infer_data_loader = data.DataLoader(infer_dataset, batch_size=batch_size, shuffle=False)  # 16

    t = start_time

    eeg_data, eog_data = next(iter(infer_data_loader))

    # l = eeg_data.shape[0] // num_seq

    # new_pred = []

    # for i in range(l):
    #     pred, feat_list = test_model(eeg_data[i*num_seq].unsqueeze(0).float().to(device), eog_data[i*num_seq].unsqueeze(0).float().to(device))
    #     pred = np.array([i.argmax(-1).item() for i in pred])
    #     for j in range(num_seq):
    #         new_pred.append(pred[j])

    pred, feat_list = test_model(eeg_data[t].unsqueeze(0).float().to(device), eog_data[t].unsqueeze(0).float().to(device))
    pred = np.array([i.argmax(-1).item() for i in pred])


    # for i in feat_list[:-1]:
    #     print(i[0].shape, i[1].shape, i[2].shape)

    # print(feat_list[-1].shape)

    label_dict = ['Wake', 'N1', 'N2', 'N3', 'REM']
    pred_list = [label_dict[i] for i in pred]
    # print("pred_list",pred_list)
    # print("new_pred_list",new_pred)

    # data_new = [
    #     {'name': 'N1期', 'value': 0},
    #     {'name': 'N2期', 'value': 0},
    #     {'name': 'N3期', 'value': 0},
    #     {'name': 'REM期', 'value': 0}
    # ]
    #
    # for i in new_pred:
    #     if i == 1:
    #         data_new[0]['value'] += 0.5
    #     elif i == 2:
    #         data_new[1]['value'] += 0.5
    #     elif i == 3:
    #         data_new[2]['value'] += 0.5
    #     elif i == 4:
    #         data_new[3]['value'] += 0.5

    # 定义文件夹和文件名
    # directory = "sleep_proportion"  # 存储数据的文件夹名称
    # file_name = "sleep_data.json"  # 存储数据的文件名称
    # file_path = os.path.join(directory, file_name)  # 拼接完整的文件路径
    #
    # # 如果文件夹不存在，则创建文件夹
    # if not os.path.exists(directory):
    #     os.makedirs(directory)  # 创建文件夹
    #
    # # 将data_new数据保存到JSON文件中
    # with open(file_path, 'w') as file:  # 打开文件用于写入
    #     json.dump(data_new, file)  # 将data_new转换为JSON格式并写入文件

#########################################################################################################################################
    # ## 画图 ##
    # ###### Interpreting inter-epoch relationships  ##########
    # plt.rcParams['axes.linewidth'] = 2
    seq_features = feat_list[-1]  ##extracting learned inter-epoch features
    # # seq_atten = atten_interpret(seq_features.squeeze(),seq_features.squeeze()).squeeze().detach().cpu().numpy()
    # # print(seq_atten.shape)
    # # plt.figure()
    # # plt.imshow(seq_atten)
    #
    # fig, axs = plt.subplots(5, 1, figsize=(1 * 5, 15 * 8))
    seq_atten_list = []
    for i in range(num_seq):
        seq_atten = atten_interpret(seq_features.squeeze()[i].unsqueeze(0),
                                    seq_features.squeeze()).squeeze().detach().cpu().numpy()
    #
    #     rgba_colors = np.zeros((num_seq, 4))
    #     rgba_colors[:, 0] = 0  # value of red intensity divided by 256
    #     rgba_colors[i, 0] = 0.4  # value of red intensity divided by 256
    #     rgba_colors[:, 1] = 0  # value of green intensity divided by 256
    #     rgba_colors[:, 2] = 0.4  # value of blue intensity divided by 256
    #     rgba_colors[i, 2] = 0
        seq_atten = seq_atten / seq_atten.max()
    #
        seq_atten_list.append(seq_atten)  #
    #     rgba_colors[:, -1] = seq_atten
    #     ###############################################################################
    #     # axs[i].bar(np.arange(1, 16), seq_atten / seq_atten.max(),  # color ='blue',
    #     #            color=rgba_colors, align='center', width=0.8)
    #     axs[i].bar(np.arange(1, 6), seq_atten / seq_atten.max(),  # color ='blue',
    #                color=rgba_colors, align='center', width=0.8)
    #     ###############################################################################
    #     # axs[i//5][i%5].set_title('')
    #     axs[i].tick_params(axis='x', labelsize=30)  # ,which = 'both')
    #     axs[i].tick_params(axis='y', labelsize=30)
    #     axs[i].set_xlabel('Epochs', fontsize=30)
    #     yticks = axs[i].yaxis.get_major_ticks()
    #     yticks[0].label1.set_visible(False)
    #
    # save_dir = "static/picture"
    # if not os.path.exists(save_dir):
    #     os.makedirs(save_dir)
    # save_path = os.path.join(save_dir, "picture_2.jpg")
    # plt.savefig(save_path, dpi=300)
    #
    # ###### Interpreting cross-modal relationships  ##########
    # ###############################################################################
    # # fig, axs = plt.subplots(15, 1, figsize=(1 * 5, 15 * 10))
    # fig, axs = plt.subplots(5, 1, figsize=(1 * 5, 15 * 10))
    # ###############################################################################
    #
    cross_atten_list = []  #
    # from matplotlib.font_manager import FontProperties
    # my_font = FontProperties(fname='env/simhei.ttf')
    #
    for i in range(num_seq):
        cross_features = feat_list[i][-1]  ##extracting learned cross-modal features
        cross_atten = atten_interpret(cross_features.squeeze()[0].unsqueeze(0),
                                      cross_features.squeeze()[1:]).squeeze().detach().cpu().numpy()
        cross_atten_list.append(cross_atten)  #
    #
    #     rgba_colors = np.zeros((2, 4))
    #     rgba_colors[:, 0] = 0.4  # value of red intensity divided by 256
    #     rgba_colors[:, 1] = 0  # value of green intensity divided by 256
    #     rgba_colors[:, 2] = 0  # value of blue intensity divided by 256
    #     rgba_colors[:, -1] = cross_atten + 0.1
    #     axs[i].bar(['EEG', 'EOG'], cross_atten,  # color ='red',
    #                color=rgba_colors, align='center', width=0.9)
    #     axs[i].tick_params(axis='x', labelsize=30)  # ,which = 'both')
    #     axs[i].tick_params(axis='y', labelsize=30)
    #     axs[i].set_ylim(0, 1.02)
    #     axs[i].set_xlabel('注意力占比', fontsize=30, fontproperties=my_font)
    # # 创建文件名，包含当前日期时间
    #
    # save_dir = "static/picture"
    # if not os.path.exists(save_dir):
    #     os.makedirs(save_dir)
    # save_path = os.path.join(save_dir, "picture_3.jpg")
    # plt.savefig(save_path, dpi=300)
    # # plt.savefig(f'/content/cross_modal_sub_{subject_no}_day_{days}_t_{t}_part_1.pdf',dpi = 300)
    #
    # ###### Interpreting intra-modal relationships  ##########
    #     ###### 解释同模态内部关系  ##########
    # plt.rcParams['axes.linewidth'] = 20
    #
    # ###############################################################################
    # # fig, axs = plt.subplots(15, 2, figsize=(2 * 50, 15 * 20))
    # fig, axs = plt.subplots(5, 2, figsize=(2 * 50, 15 * 20))
    # ###############################################################################
    # # seq_features = feat_list[-1]
    eeg_atten_list = []  #
    eog_atten_list = []  #
    for i in range(num_seq):
        eeg_features = feat_list[i][0]  ##extracting learned intra-modal EEG features
        eog_features = feat_list[i][1]  ##extracting learned intra-modal EOG features
        cross_features = feat_list[i][-1]  ##extracting learned cross-modal features
    #
        eeg_atten = atten_interpret(cross_features.squeeze()[0].unsqueeze(0),
                                    eeg_features.squeeze()[1:])  # .squeeze().detach().cpu().numpy()
        eog_atten = atten_interpret(cross_features.squeeze()[0].unsqueeze(0),
                                    eog_features.squeeze()[1:])  # .squeeze().detach().cpu().numpy()
    #
        eeg_atten = F.upsample(eeg_atten.unsqueeze(0), scale_factor=3000 // 60,
                               mode='nearest').squeeze().detach().cpu().numpy()
        eog_atten = F.upsample(eog_atten.unsqueeze(0), scale_factor=3000 // 60,
                               mode='nearest').squeeze().detach().cpu().numpy()
    #
        eeg_atten_list.append(eeg_atten)  #
        eog_atten_list.append(eog_atten)  #
    #
    #     t1 = np.arange(0, 30, 1 / 256)
    #     plot_interpret(i, t1, eeg_data[t, 0, i, :].squeeze().cpu().numpy(), eeg_atten, fig, axs, [i, 0], signal_type="EEG")
    #     plot_interpret(i, t1, eog_data[t, 0, i, :].squeeze().cpu().numpy(), eog_atten, fig, axs, [i, 1], signal_type="EOG")
    #
    # save_dir = "static/picture"
    # if not os.path.exists(save_dir):
    #     os.makedirs(save_dir)
    # save_path = os.path.join(save_dir, "picture_4.jpg")
    # plt.savefig(save_path, dpi=200)
#########################################################################################################################################


    """# Final plot similar to the paper"""
    # fig, axs = plt.subplots(num_seq, 4,figsize=(200, 20*num_seq),gridspec_kw={'width_ratios': [2,2,10,10]}) # for more clear figure
    fig, axs = plt.subplots(num_seq, 4, figsize=(100, 15 * num_seq), gridspec_kw={'width_ratios': [2, 2, 10, 10]})  # 增加整体高度
    plt.subplots_adjust(hspace=0.4)  # 增加子图之间的垂直间距
    title_font_size = fig.dpi * 0.8  # 增大标题字体大小
    label_font_size = fig.dpi * 0.6  # 增大标签字体大小
    for i in range(num_seq):
        # Plotting inter-epoch attention ##############################
        rgba_colors = np.zeros((num_seq, 4))
        rgba_colors[:, 0] = 0  # value of red intensity divided by 256
        rgba_colors[i, 0] = 0.4  # value of red intensity divided by 256
        rgba_colors[:, 1] = 0  # value of green intensity divided by 256
        rgba_colors[:, 2] = 0.4  # value of blue intensity divided by 256
        rgba_colors[i, 2] = 0
        rgba_colors[:, -1] = seq_atten_list[i]
        axs[i][0].bar(np.arange(1, num_seq + 1), seq_atten_list[i] / seq_atten_list[i].max(),
                      # /seq_attn[i].max(),# color ='blue',
                      color=rgba_colors, align='center')
        # axs[i//5][i%5].set_title('')
        axs[i][0].tick_params(axis='x', labelsize=label_font_size)
        axs[i][0].tick_params(axis='y', labelsize=label_font_size)
        axs[i][0].set_xlabel('Epochs', fontsize=title_font_size)
        yticks = axs[i][0].yaxis.get_major_ticks()
        yticks[0].label1.set_visible(False)

        # Plotting cross-modal attention ##############################
        rgba_colors = np.zeros((2, 4))
        rgba_colors[:, 0] = 0.4  # value of red intensity divided by 256
        rgba_colors[:, 1] = 0  # value of green intensity divided by 256
        rgba_colors[:, 2] = 0  # value of blue intensity divided by 256
        rgba_colors[:, -1] = cross_atten_list[i]
        axs[i][1].bar(['EEG', 'EOG'], cross_atten_list[i],  # color ='red',
                      color=rgba_colors, align='center')
        axs[i][1].tick_params(axis='x', labelsize=label_font_size)
        axs[i][1].tick_params(axis='y', labelsize=label_font_size)
        axs[i][1].set_ylim(0, 1.02)
        axs[i][1].set_xlabel('Signal', fontsize=title_font_size)

        # # Plotting EEG attention ##############################
        eeg_atten_epoch = eeg_atten_list[i]
        ###############################################################################
        t1 = np.arange(0, 3000, 1)
        ###############################################################################
        plot_interpret(i, t1, eeg_data[t, 0, i, :].squeeze().cpu().numpy(), eeg_atten, fig, axs, [i, 2],
                       signal_type=f"EEG Class:{pred_list[i]}")

        # plot_interpret(t1,eog_data[t,0,i,:].squeeze().cpu().numpy(),eog_atten,fig,[i,1],signal_type = "EOG")

        # # Plotting EOG attention #
        eog_atten_epoch = eog_atten_list[i]
        plot_interpret(i, t1, eog_data[t, 0, i, :].squeeze().cpu().numpy(), eog_atten, fig, axs, [i, 3],
                       signal_type=f"EOG Class:{pred_list[i]}")

    # time = [int(record_id.split('-')[1].split('_')[i]) for i in range(num_epoch_seq)]
    # plt.subplots_adjust(wspace=0.2)
    # fig.suptitle('Interpretation for patient '+str([38])+' for 30s epochs from '+str(start_time_point)+'s',fontsize = title_font_size*2)

    save_dir = "static/picture"
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    save_path = os.path.join(save_dir, "picture_5.jpg")
    plt.savefig(save_path, dpi=100)

if __name__ == '__main__':
    main()
