#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# siamses+CNN，

import os
import warnings
import time

import numpy as np
import pywt
from scipy.io import loadmat
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import argparse
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from torch.optim.lr_scheduler import StepLR
from werkzeug.debug import console

from scipy.signal import medfilt, filtfilt, cheby2

# 过滤掉特定的警告
warnings.filterwarnings("ignore", category=UserWarning, message="Casting complex values to real discards the imaginary part.*")


# 如果有GPU则使用GPU，否则使用CPU
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cuda:0")
#定义CSI数据集
class CSIDataset(Dataset):
    def __init__(self, path, transform=None):
        self.csiData, self.activities, self.labelNum = self.merge_csi_DataAndLabel(path)
        self.transform = transform

        if self.csiData.size == 0:
            print("csiData is empty. Please check the data loading process.")
            exit()


    def __len__(self):
        return len(self.csiData)

#随机获取两个样本，可能是同类样本，也可能是异类样本
    def __getitem__(self, idx):
        # Extract CSI data for the given index
        #idx就是指取出一个类别的所有100份样本
        def hampel_filter(data, window_size=5, n_sigmas=3):
            n = len(data)
            filtered_data = data.copy()
            # Sliding window through the data
            for i in range(window_size, n - window_size):
                window = data[i - window_size: i + window_size + 1]
                median = np.median(window)
                std_dev = np.std(window)
                threshold = n_sigmas * std_dev

                # Replace outliers with median
                if abs(data[i] - median) > threshold:
                    filtered_data[i] = median

            return filtered_data

        def chebyshev2_filter(data, filter_type='low', cutoff=0.3, order=4, rs=20, fs=1.0):
            """
            使用Chebyshev Type II滤波器对数据进行过滤。
            参数：
                data: 待滤波的信号数据
                filter_type: 滤波器类型 ('low' 低通, 'high' 高通, 'bandpass' 带通, 'bandstop' 带阻)
                cutoff: 截止频率（0到1之间，1表示Nyquist频率）
                order: 滤波器阶数，越高则滤波越强
                rs: 阻带衰减（dB）
                fs: 采样率
            返回：
                经过滤波的数据
            """
            # 设计Chebyshev Type II滤波器
            b, a = cheby2(order, rs, cutoff, btype=filter_type, fs=fs)
            # 使用filtfilt进行双向过滤，防止相位偏移
            filtered_data = filtfilt(b, a, data)
            return filtered_data
        def process(csiData):

            amplitudeCSI = abs(csiData)
            # 使用 Hampel 滤波器处理幅度数据
            amplitudeCSI = hampel_filter(amplitudeCSI)
            # 切比雪夫II型滤波器
            amplitudeCSI = chebyshev2_filter(amplitudeCSI, filter_type='low', cutoff=0.3, order=4, rs=20)
            # 傅里叶变换
            fftData = np.fft.fft(amplitudeCSI)

            return fftData


        csiData = self.csiData[idx]
        dwtCSI = process(csiData)
        # 转换为 Torch 张量
        csi = torch.from_numpy(dwtCSI).float().unsqueeze(0)
        # 1050*3*30*200
        #3*30*200
        #200是数据包
        #1050个样本，跌倒动作只有50个
        #3*30是CSI矩阵信息
        # csi张量不参与反向传播
        csi.requires_grad = False
        # 得到一个1050*200*90的张量，原始的
        # csi = csi.view(1, 50,90)
        csi = csi.view(1, 200, 90)
        # 在第0维度上拼接，得到一个1050*200*90*1的张量
        csi = torch.cat([csi], dim=0)

        # 打印最终的张量形状以验证
        #print(f"Final CSI tensor shape: {csi.shape}")


        label1 = self.labelNum[idx].item()
        # label1 = torch.tensor(label1).long()  # 确保label1是长整型

#随机生成一个 0 或 1 的整数，表示是选择同类样本（1）还是异类样本（0）
        same_class = np.random.randint(2)
        #
        # 找到self.labelNum中与label1相同的标签的索引数组。
        # 找到self.labelNum中与label1不同的标签的索引数组。
        indices_same = np.where(self.labelNum == label1)[0]
        indices_diff = np.where(self.labelNum != label1)[0]

        # 检查是否存在同类或异类样本
        #都会进行随机取出一个
        if same_class and len(indices_same) > 0:
            idx2 = np.random.choice(indices_same)
        elif not same_class and len(indices_diff) > 0:
            idx2 = np.random.choice(indices_diff)

        else:
            # 如果既没有同类也没有异类样本，则抛出异常或返回特殊值
            raise ValueError(
                f"No valid sample found for {'same' if same_class else 'different'} class comparison at index {idx}.")

        csiData2 = self.csiData[idx2]

        dwtCSI2 = process(csiData2)
        csi2 = torch.from_numpy(dwtCSI2).float().unsqueeze(0)



        # 1050*3*30*200
        # csi张量不参与反向传播
        csi2.requires_grad = False
        # 得到一个1050*200*90的张量
        csi2 = csi2.view(1, 200, 90)
        # 在第0维度上拼接，得到一个1050*200*90*1的张量
        csi2 = torch.cat([csi2], dim=0)


        if self.transform:
            csi = self.transform(csi)
            csi2 = self.transform(csi2)
            # Transform csiData2 similarly if necessary

        return csi, csi2, np.array([same_class], dtype=np.float32)

    @staticmethod
    #读取CSI数据
    def merge_csi_DataAndLabel(path):
        #1,1,1,1 ...,2,2,..,..,...11(每个有5*20个)
        listDir = []

        csiData = []
        labelList = ['lying', 'sitting', 'running', 'walking', 'pick up', 'wave hand', 'jump', 'squat', 'sit down',
                     'stand up', 'fall down']
        #人体活动对应标签
        labelNum = []
        #活动名称
        activity = []

        for root, dirs, files in os.walk(path, topdown=False):

            #name为动作名称，以1,2,.. 11来表示
            for name in dirs:
                # root所指的是当前正在遍历的这个文件夹的本身的地址
                # dirs是一个list ，内容是该文件夹中所有的目录的名字(不包括子目录)
                # fles同样是list, 内容是该文件夹中所有的文件(不包括子目录)

                #每个动作的文件夹地址
                dir_path = os.path.join(root, name)
                if name.isdigit():

                    #每个动作对应的文件夹的名称
                    listDir.append(int(name))
            #每个动作对应下的每张图
                whole_file = [os.path.join(dir_path, file) for file in os.listdir(dir_path) if file.endswith('.mat')]
                #循环100次，每个动作有100个样本，存入
                for w in whole_file:
                    data = loadmat(w)['csi']
                    #每个csi数据
                    csiData.append(data)
                    #每个csi数据对应的动作类别
                    activity.append(name)
                    #console.log(name,"***")
                    #存入每个动作对应的索引
                    labelNum.append(listDir.index(int(name)))
#最终为【0.。。。0,1.。。。1.。。。。。11.。。。。11】

        csiData = np.array(csiData, dtype=complex)
        labelNum = np.array(labelNum)
        return csiData, activity, labelNum



#分出训练集和测试集
def create_dataloader(path, batch_size=16, num_workers=1, test_size=0.2):

    #调用类CSIDataset
    dataset = CSIDataset(path)

    # 假设CSIDataset有一个方法来获取数据的数量，例如__len__()
    dataset_length = len(dataset)

    # 计算测试集的大小
    test_size = int(dataset_length * test_size)

    # 分割数据集为训练集和测试集
    train_dataset, test_dataset = train_test_split(dataset, test_size=test_size, random_state=40, shuffle=True)

    # 创建训练集和测试集的数据加载器
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)

    return train_loader, test_loader


# 定义CNN结构
#特征提取网路
class CNN_net(nn.Module):
    def __init__(self, num_classes):
        super(CNN_net, self).__init__()
        self.conv_block_a = nn.Sequential(
            #输入1，
            nn.Conv2d(1, 128, kernel_size=3, stride=1, padding=1),  # Adjusted to accept 1 input channel
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),

            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            #批归一化层，加快速度
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            # nn.Dropout(0.5),  # 添加 Dropout

            # 添加 SE 模块
            SEBlock(128)  # 这里使用通道注意力
        )

        # Convolution Block B
        self.conv_block_b = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),

            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),

            nn.MaxPool2d(kernel_size=2, stride=2),
            # nn.Dropout(0.5),  # 添加 Dropout

            # 添加 SE 模块
            SEBlock(256)  # 同样添加通道注意力
        )
        self.flatten = nn.Flatten()

    def forward(self, x):
        x = self.conv_block_a(x)
        x = self.conv_block_b(x)
        x = self.flatten(x)
        return x
class SEBlock(nn.Module):
    def __init__(self, channel, reduction=16):
        super(SEBlock, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction),
            nn.ReLU(inplace=True),
            nn.Linear(channel // reduction, channel),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y


class SimilarityNetwork(nn.Module):
    def __init__(self, input_size, num_classes):
        super(SimilarityNetwork, self).__init__()
        # 根据CNN的实际输出尺寸设置input_size
        self.fc1 = nn.Linear(input_size, 1024)  # input_size现在是281600
        self.fc2 = nn.Linear(1024, 512)

        #用于几分类任务则输出num_classes
        #训练过程属于二分类，相同则为1，反之则为0

        self.fc3 = nn.Linear(512, num_classes)  # 用于分类的全连接层
        self.relu = nn.ReLU()

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)  # nn.CrossEntropyLoss自带Softmax
        return x

#相似度计算网络
class SiameseNetwork(nn.Module):
    def __init__(self, num_classes):
        super(SiameseNetwork, self).__init__()
        #获取维度的数据特征
        self.feature_extraction = CNN_net(num_classes=num_classes)
        # 使用根据CNN输出调整的input_size

        self.similarity_computation = SimilarityNetwork(input_size=281600, num_classes=num_classes)

    def forward(self, x1, x2):
        #512
        F_x1 = self.feature_extraction(x1)
        F_x2 = self.feature_extraction(x2)
        #distance为特征之间的差距
        distance = torch.abs(F_x1 - F_x2)
        similarity = self.similarity_computation(distance)
        return similarity


def train(model, train_loader, criterion, optimizer, num_epochs=20, device=None):
    start_time=time.time()
    if device is None:
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model.to(device)

    #训练
    model.train()

    # 引入学习率调度器（假设使用StepLR）
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

    best_accuracy = 0.0

    for epoch in range(num_epochs):
        total_loss = 0.0
        correct_predictions = 0
        total_samples = 0

#在分数据集的时候就是二元组数据集
        for data in train_loader:
            inputs1, inputs2, labels = data
            inputs1, inputs2, labels = inputs1.to(device), inputs2.to(device), labels.to(device)

            #处理标签
            labels = labels.squeeze().long()
            #每次训练清空梯度
            optimizer.zero_grad()

            outputs = model(inputs1, inputs2)
            loss = criterion(outputs, labels)

#损失
            loss.backward()
            #梯度更新参数
            optimizer.step()

            total_loss += loss.item()
            #找到模型输出中每个样本的预测类别（具有最高概率的类别）
            _, predicted = torch.max(outputs, 1)

            #统计正确预测的个数
            correct_predictions += (predicted == labels).sum().item()
            #样本综述
            total_samples += labels.size(0)

        accuracy = correct_predictions / total_samples
        end_time = time.time()
        total_time = end_time - start_time
        print(f"一轮的训练时间: {total_time // 60} 分钟 {total_time % 60:.2f} 秒")

        print(
            f"Epoch {epoch + 1}/{num_epochs}, Loss: {total_loss / len(train_loader):.4f}, Accuracy: {accuracy * 100:.2f}%")

        # 更新学习率
        scheduler.step()

        # 可选：保存最佳模型
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            model_out_name = 'models/baseline_best_model' + '.pt'
            directory = os.path.dirname(model_out_name)
            # 如果不存在就创建
            if not os.path.exists(directory):
                os.makedirs(directory)
            torch.save(model.state_dict(), model_out_name)

        if(epoch>3):
           evaluate(model, test_loader)

    #测试集进行评估
def evaluate(model, test_loader, device='cuda'):
    model.to(device)
    model.eval()  # 设置模型为评估模式
    correct = 0
    total = 0

    #节省时间
    with torch.no_grad():  # 在评估过程中不计算梯度
        #评估测试集中的准确率
        for data in test_loader:
            inputs1, inputs2, labels = data
            inputs1, inputs2, labels = inputs1.to(device), inputs2.to(device), labels.to(device)
            labels = labels.squeeze().long()  # 确保labels是一维的长整型Tensor

            outputs = model(inputs1, inputs2)  # 获取模型输出
            _, predicted = torch.max(outputs, 1)  # 获取预测的类别索引

            total += labels.size(0)  # 累加总的样本数
            correct += (predicted == labels).sum().item()  # 累加正确预测的样本数

    accuracy = 100 * correct / total  # 计算准确率
    print(f"Accuracy: {accuracy:.4f}%")  # 打印准确率


def get_args():
    parser = argparse.ArgumentParser(description='Transformer-BLS')
    parser.add_argument('--sample', type=int, default=1, help='sample length on temporal side')
    parser.add_argument('--batch', type=int, default=8, help='batch size [default: 16]')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate [default: 0.001]')
    parser.add_argument('--epoch', type=int, default=10, help='number of epoch [default: 20]')
    parser.add_argument('--hlayers', type=int, default=6, help='horizontal transformer layers [default: 6]')
    parser.add_argument('--hheads', type=int, default=9, help='horizontal transformer head [default: 9]')
    parser.add_argument('--vlayers', type=int, default=1, help='vertical transformer layers [default: 1]')
    parser.add_argument('--vheads', type=int, default=200, help='vertical transformer head [default: 200]')
    parser.add_argument('--com_dim', type=int, default=50, help='compressor vertical transformer layers [default: 50]')
    args = parser.parse_args()
    return args


if __name__ == "__main__":
    #记录开始时间
    start_time = time.time()


    args = get_args()
    # 路径、数据加载及模型初始化
    path = r'./Processed Data/bedroom/'
    # path = r'./Processed Data/meetingroom/'

    labelList = ['lying', 'sitting', 'running', 'walking', 'pick up', 'wave hand', 'jump', 'squat', 'site down',
                 'stand up', 'fall down']  # 标签列表

    num_classes = len(labelList)  # 类别总数：11

#11=num_Classes
    model = SiameseNetwork(num_classes).to(device='cuda')
    #评估指标为交叉熵
    criterion = nn.CrossEntropyLoss()

    #adam，仅lr改变 0.001
    optimizer = optim.Adam(model.parameters(), lr=0.0001)

    #16batch
    train_loader, test_loader = create_dataloader(path, batch_size=8, num_workers=1)
    num_epochs = args.epoch

    #epoch：20轮
    train(model, train_loader, criterion, optimizer, num_epochs=40)

    model_out_name = 'models/model_siam_resnet' + '.pt'

    directory = os.path.dirname(model_out_name)
    # 如果不存在就创建
    if not os.path.exists(directory):
        os.makedirs(directory)
    torch.save(model.state_dict(), model_out_name)

    evaluate(model, test_loader)

    # 获取结束时间
    end_time = time.time()
    total_time = end_time - start_time
    print(f"总训练时间: {total_time // 60} 分钟 {total_time % 60:.2f} 秒")