import matplotlib.pyplot as plt
from scipy import signal
import numpy as np
import torch
import torch.nn as nn

class TimeAttention(nn.Module):
    def __init__(self):
        super(TimeAttention, self).__init__()
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        # 平均值
        mean_x = x.mean(dim=2, keepdim=True)

        mean_x[mean_x == 0] = 1e-8  # 避免平均值是0

        normalized_x = x / mean_x

        # 计算At值
        A_t = self.sigmoid(normalized_x)

        return A_t

class BiLSTM(nn.Module):
    def __init__(self,L,hidden_size):
        super(BiLSTM, self).__init__()
        self.bilstm = nn.LSTM(L, hidden_size, bidirectional=True) #双向长短期记忆
        self.TimeAttention = TimeAttention()

    def forward(self, x):
        AT = self.TimeAttention(x)
        x_in = AT * x
        out, (h , c) = self.bilstm(x_in)
        return out

class ChannelAttention(nn.Module):
    def __init__(self, in_channels, reduction_ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        # 定义下采样的权重和偏差
        self.qd = nn.Conv2d(in_channels*2, in_channels*2 // reduction_ratio, kernel_size=1)
        self.bd = nn.Parameter(torch.zeros(1,in_channels*2 // reduction_ratio, 1,1))

        # 定义上采样的权重和偏差
        self.qu = nn.Conv2d(in_channels*2 // reduction_ratio, in_channels*2, kernel_size=1)
        self.bu = nn.Parameter(torch.zeros(1, in_channels*2, 1, 1))

        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()

    def forward(self, x):
        cavg = self.avg_pool(x)  # (B, C, 1, 1) -> (B, 64, 1, 1)
        cmax = self.max_pool(x)  # (B, C, 1, 1) -> (B, 64, 1, 1)

        combined = torch.cat([cavg,cmax],dim=1)

        # 下采样
        cd = self.qd(combined) + self.bd
        cd = self.relu(cd)
        # 上采样

        c_prime = self.qu(cd) + self.bu
        c_prime = self.relu(c_prime)

        # 拆分
        c_avg,c_max = c_prime.split(cavg.size(1),dim=1)

        # 计算通道注意力值A_c
        c = c_avg + c_max
        A_c = self.sigmoid(c)

        return A_c

class SpatialAttention(nn.Module):
    def __init__(self, in_channels):
        super(SpatialAttention, self).__init__()

        # 定义 3x3 卷积层 (qs) 和偏置 (bs)
        self.qs = nn.Conv2d(2, 1, kernel_size=3, padding=1)  # 输出通道数为 1
        self.bs = nn.Parameter(torch.zeros(1))  # 偏置初始化为零

        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()
    def forward(self, x):
        # 计算最大值和平均值
        smax = torch.max(x, dim=1, keepdim=True)[0]  # 计算沿通道维度的最大值
        savg = torch.mean(x, dim=1, keepdim=True)  # 计算沿通道维度的平均值

        # 拼接最大值和平均值
        S_concat = torch.cat((smax, savg), dim=1)  # 拼接后形状为 (N, 2, w, h)

        # 通过卷积、加偏置和激活函数计算 s''
        s_double_prime = self.qs(S_concat) + self.bs  # 形状为 (N, 1, w, h)
        s_double_prime = self.relu(s_double_prime)  # 应用 ReLU 激活函数

        # 计算空间注意力值 A_s
        A_s = self.sigmoid(s_double_prime)  # 形状为 (N, 1, w, h)

        return A_s

class AFNetLayer(nn.Module):
    def __init__(self, in_channels):
        super(AFNetLayer, self).__init__()
        # 输入层
        self.input = nn.Conv2d(in_channels,32,kernel_size=3,stride=1) #变成N,32,w,h

        # 卷积层
        self.channel_attention1 = ChannelAttention(in_channels=32)
        self.spatial_attention1 = SpatialAttention(in_channels=32)
        self.conv1 = nn.Conv2d(32, 32, kernel_size=3, stride=1)
        self.bias1 = nn.Parameter(torch.zeros(1, 32, 1, 1))

        self.channel_attention2 = ChannelAttention(in_channels=64)
        self.spatial_attention2 = SpatialAttention(in_channels=64)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1)
        self.bias2 = nn.Parameter(torch.zeros(1, 64, 1, 1))

        self.channel_attention3 = ChannelAttention(in_channels=128)
        self.spatial_attention3 = SpatialAttention(in_channels=128)
        self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=1)
        self.bias3 = nn.Parameter(torch.zeros(1, 128, 1, 1))

        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)

        # 输出层
        self.fc = nn.Linear(128 * 2 * 2, 128)

        self.activation = nn.LeakyReLU(negative_slope=0.01)

    def forward(self,x):
        # 输入
        p = self.input(x)

        # 卷积
        fp1_prime = self.activation(self.conv1(p) + self.bias1)
        Ac1 = self.channel_attention1(fp1_prime)
        As1 = self.spatial_attention1(fp1_prime)
        fp1 = fp1_prime * Ac1 + fp1_prime * As1
        fp1 = self.maxpool(fp1)

        fp2_prime = self.activation(self.conv2(fp1) + self.bias2)
        Ac2 = self.channel_attention2(fp2_prime)
        As2 = self.spatial_attention2(fp2_prime)
        fp2 = fp2_prime * Ac2 + fp2_prime * As2
        fp2 = self.maxpool(fp2)

        fp3_prime = self.activation(self.conv3(fp2) + self.bias3)
        Ac3 = self.channel_attention3(fp3_prime)
        As3 = self.spatial_attention3(fp3_prime)
        fp3 = fp3_prime * Ac3 + fp3_prime * As3
        fp3 = self.maxpool(fp3)

        # 输出
        fp3 = fp3.view(fp3.size(0), -1)
        fp3 = self.fc(fp3)

        outp = fp3
        return outp

class Classify(nn.Module):
    def __init__(self):
        super(Classify,self).__init__()
        self.flatten = nn.Flatten()
        self.fc = nn.Linear(384,2)
    def forward(self,x):
        x = self.flatten(x)
        x = self.fc(x)
        return x


# 输入数据
fs = 200.0  # 采样频率 (Hz)
# t = np.linspace(0, 120, 24000, endpoint=False)  # 1秒钟的时间向量(1s 200个)
# x = np.sin(2 * np.pi * 10 * t) +  np.sin(2 * np.pi * 50 * t)  # 合成信号
d1 = np.loadtxt('/content/drive/MyDrive/Data as txt Files/sub1.txt')
data = d1[10:24010,0]

# 高通滤波器
b2, a2 = signal.butter(8, 0.15, 'highpass')
# 截止频率为15Hz，采样频率为200Hz，Wn=2*15/200 = 0.15
filtedData2 = signal.filtfilt(b2, a2, data)

# 将数据转换成20 * 1200，一行是6s的数据 ，一维数据
x = filtedData2.reshape(1,-1)
x = x.reshape(20,1200)
x_copy = x.copy()
y = torch.from_numpy(x_copy)
y = y.unsqueeze(1)
print(y.shape)  #[24,1,1000] ,[N,1,L]
y = y.to(torch.float32)

# ATNet
hidden_size = 128
L = 1200
bilstm = BiLSTM(L,hidden_size=128)

output = bilstm(y)
ft = output.squeeze(1)
print(ft.size()) # torch.Size([N, 256])

# 傅里叶变换，二维数据
b2, a2 = signal.butter(8, 0.15, 'highpass')
# 截止频率为15Hz，采样频率为200Hz，Wn=2*15/200 = 0.15
Zxx_list = []
for i in range(20):
  filtedData2 = signal.filtfilt(b2, a2, x[i])
  f,t,Zxx = signal.stft(filtedData2, fs,nperseg=100,noverlap=50)
  Zxx_list.append(Zxx)

Zxx_array = np.array(Zxx_list)
AFin = Zxx_array.copy()
AFin = torch.from_numpy(AFin)
AFin = AFin.unsqueeze(1)
AFin = AFin.to(torch.float32)
print(AFin.shape)  #[24,1,51,25]


# AFNet
in_channels = 1
afnet_layer = AFNetLayer(in_channels)

fp = afnet_layer(AFin)
print("Output shape:", fp.shape) #[N,128]

# 拼接在一起
fm = torch.cat((fp, ft), dim=1)
print(fm.shape) # [N,384]

classify = Classify()
fpp = classify(fm)
print(fpp.shape)