#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import warnings
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from scipy.io import loadmat
from scipy.signal import medfilt, filtfilt, cheby2
from argparse import ArgumentParser
from torch.cuda.amp import autocast, GradScaler
from MyNewModel import TransformerM  # 使用MyNewModel中的TransformerM

warnings.filterwarnings("ignore", category=UserWarning, message="Casting complex values to real discards the imaginary part.*")

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# CSI数据集定义
class CSIDataset(Dataset):
    def __init__(self, path, transform=None):
        self.csiData, self.activities, self.labelNum = self.merge_csi_DataAndLabel(path)
        self.transform = transform
        if self.csiData.size == 0:
            print("csiData is empty. Please check the data loading process.")
            exit()

    def __len__(self):
        return len(self.csiData)

    def __getitem__(self, idx):
        def hampel_filter(data, window_size=7, n_sigmas=3):
            n = len(data)
            filtered_data = data.copy()
            for i in range(window_size, n - window_size):
                window = data[i - window_size: i + window_size + 1]
                median = np.median(window)
                std_dev = np.std(window)
                threshold = n_sigmas * std_dev
                if abs(data[i] - median) > threshold:
                    filtered_data[i] = median
            return filtered_data

        def chebyshev2_filter(data, filter_type='low', cutoff=0.3, order=4, rs=20, fs=1.0):
            b, a = cheby2(order, rs, cutoff, btype=filter_type, fs=fs)
            filtered_data = filtfilt(b, a, data)
            return filtered_data

        def process(csiData):
            amplitudeCSI = abs(csiData)
            amplitudeCSI = hampel_filter(amplitudeCSI)
            amplitudeCSI = chebyshev2_filter(amplitudeCSI, filter_type='low', cutoff=0.3, order=4, rs=20)
            fftData = np.fft.fft(amplitudeCSI)
            #fftData = fftData[:50]
            return fftData

        csiData = self.csiData[idx]
        dwtCSI = process(csiData)
        csi = torch.from_numpy(dwtCSI).float().unsqueeze(0)
        csi.requires_grad = False
        #csi = csi.view(1, 50,90)
        csi = csi.view(1, 200, 90)


        label1 = self.labelNum[idx].item()
        same_class = np.random.randint(2)
        indices_same = np.where(self.labelNum == label1)[0]
        indices_diff = np.where(self.labelNum != label1)[0]

        if same_class and len(indices_same) > 0:
            idx2 = np.random.choice(indices_same)
        elif not same_class and len(indices_diff) > 0:
            idx2 = np.random.choice(indices_diff)
        else:
            raise ValueError("No valid sample found.")

        csiData2 = self.csiData[idx2]
        dwtCSI2 = process(csiData2)
        csi2 = torch.from_numpy(dwtCSI2).float().unsqueeze(0)
        csi2.requires_grad = False
        #csi2 = csi2.view(1, 50, 90)
        csi2 = csi2.view(1, 200, 90)

        if self.transform:
            csi = self.transform(csi)
            csi2 = self.transform(csi2)

        return csi, csi2, np.array([same_class], dtype=np.float32)

    @staticmethod
    def merge_csi_DataAndLabel(path):
        listDir, csiData, labelNum, activity = [], [], [], []
        for root, dirs, files in os.walk(path, topdown=False):
            for name in dirs:
                dir_path = os.path.join(root, name)
                if name.isdigit():
                    listDir.append(int(name))
                whole_file = [os.path.join(dir_path, file) for file in os.listdir(dir_path) if file.endswith('.mat')]
                for w in whole_file:
                    data = loadmat(w)['csi']
                    csiData.append(data)
                    labelNum.append(listDir.index(int(name)))
        return np.array(csiData, dtype=complex), [], np.array(labelNum)

def create_dataloader(path, batch_size=16, num_workers=1, test_size=0.2):
    dataset = CSIDataset(path)
    dataset_length = len(dataset)
    test_size = int(dataset_length * test_size)
    train_dataset, test_dataset = train_test_split(dataset, test_size=test_size, random_state=40, shuffle=True)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
    return train_loader, test_loader

# Similarity Network
class SimilarityNetwork(nn.Module):
    def __init__(self, input_size, num_classes):
        super(SimilarityNetwork, self).__init__()
        self.fc1 = nn.Linear(input_size, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, num_classes)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        return self.fc3(x)

# Siamese Network using Transformer
class SiameseNetworkWithTransformer(nn.Module):
    def __init__(self, args, num_classes=2):
        super(SiameseNetworkWithTransformer, self).__init__()
        self.feature_extraction = TransformerM(args)  # 使用Transformer代替CNN
        self.similarity_computation = SimilarityNetwork(input_size=90, num_classes=num_classes)

    def forward(self, x1, x2):
        F_x1 = self.feature_extraction(x1)
        F_x2 = self.feature_extraction(x2)
        distance = torch.abs(F_x1 - F_x2)
        similarity = self.similarity_computation(distance)
        return similarity

def train(model, train_loader, criterion, optimizer, num_epochs=20):
    model.to(device)
    model.train()
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
    scaler = GradScaler()  # 初始化混合精度训练的缩放器

    for epoch in range(num_epochs):
        total_loss, correct_predictions, total_samples = 0.0, 0, 0
        for data in train_loader:
            inputs1, inputs2, labels = data
            inputs1, inputs2, labels = inputs1.to(device), inputs2.to(device), labels.to(device)

            optimizer.zero_grad()  # 清除梯度

            with autocast():  # 混合精度
                outputs = model(inputs1, inputs2)  # 前向传播
                loss = criterion(outputs, labels.squeeze().long())  # 计算损失

            # 反向传播和优化步骤
            scaler.scale(loss).backward()  # 缩放损失后反向传播
            scaler.step(optimizer)        # 应用优化器
            scaler.update()               # 更新缩放器

            total_loss += loss.item()
            _, predicted = torch.max(outputs, 1)
            correct_predictions += (predicted == labels).sum().item()
            total_samples += labels.size(0)

        # 计算并打印每个 epoch 的准确率和损失
        accuracy = correct_predictions / total_samples
        print(f"Epoch {epoch+1}/{num_epochs}, Loss: {total_loss:.4f}, Accuracy: {accuracy * 100:.2f}%")
        scheduler.step()  # 更新学习率

def evaluate(model, test_loader):
    model.to(device)
    model.eval()
    correct, total = 0, 0
    with torch.no_grad():
        for data in test_loader:
            inputs1, inputs2, labels = data
            inputs1, inputs2, labels = inputs1.to(device), inputs2.to(device), labels.to(device)
            outputs = model(inputs1, inputs2)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    accuracy = 100 * correct / total
    print(f"Test Accuracy: {accuracy:.2f}%")

def get_args():
    parser = ArgumentParser()
    parser.add_argument('--sample', type=int, default=1)
    parser.add_argument('--batch', type=int, default=1)  # 降低batch size
    parser.add_argument('--lr', type=float, default=0.0001)
    parser.add_argument('--epoch', type=int, default=10)
    parser.add_argument('--hlayers', type=int, default=1)  # 降低Transformer层数
    parser.add_argument('--hheads', type=int, default=1)  # 减少注意力头数
    parser.add_argument('--vlayers', type=int, default=0, help='Vertical Transformer layers [default: 1]')  # 添加此参数
    return parser.parse_args()

    return parser.parse_args()

if __name__ == "__main__":
    args = get_args()
    path = './Processed Data/bedroom/'
    train_loader, test_loader = create_dataloader(path, batch_size=args.batch, num_workers=1)
    model = SiameseNetworkWithTransformer(args, num_classes=2).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    train(model, train_loader, criterion, optimizer, num_epochs=args.epoch)
    evaluate(model, test_loader)
