import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torch.nn.functional as F

# 设置随机种子以确保结果可重复
torch.manual_seed(42)
np.random.seed(42)

# 设备配置
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"使用设备: {device}")

# 定义注意力机制
class ChannelAttention(nn.Module):
    def __init__(self, in_channels, reduction_ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.max_pool = nn.AdaptiveMaxPool1d(1)
        
        self.fc = nn.Sequential(
            nn.Linear(in_channels, in_channels // reduction_ratio),
            nn.ReLU(),
            nn.Linear(in_channels // reduction_ratio, in_channels)
        )
        
    def forward(self, x):
        avg_out = self.fc(self.avg_pool(x).squeeze(-1))
        max_out = self.fc(self.max_pool(x).squeeze(-1))
        out = avg_out + max_out
        return torch.sigmoid(out).unsqueeze(-1)

class SpatialAttention(nn.Module):
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()
        self.conv = nn.Conv1d(2, 1, kernel_size=kernel_size, padding=kernel_size//2)
        
    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        x_cat = torch.cat([avg_out, max_out], dim=1)
        out = self.conv(x_cat)
        return torch.sigmoid(out)

# 定义残差块
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm1d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=1, bias=False)
        self.bn2 = nn.BatchNorm1d(out_channels)
        
        self.ca = ChannelAttention(out_channels)
        self.sa = SpatialAttention()
        
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm1d(out_channels)
            )
            
    def forward(self, x):
        residual = x
        
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        
        out = self.conv2(out)
        out = self.bn2(out)
        
        # 应用通道注意力
        out = out * self.ca(out)
        # 应用空间注意力
        out = out * self.sa(out)
        
        out += self.shortcut(residual)
        out = self.relu(out)
        
        return out

# 定义残差网络
class ResNetWithAttention(nn.Module):
    def __init__(self, block, num_blocks, in_channels=1, num_classes=1):
        super(ResNetWithAttention, self).__init__()
        self.in_channels = 64
        
        self.conv1 = nn.Conv1d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm1d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
        
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Linear(512, num_classes)
        
        # 简化的解码器部分（用于信号重建）
        self.decoder = nn.Sequential(
            # 上采样层1
            nn.ConvTranspose1d(512, 256, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm1d(256),
            nn.ReLU(inplace=True),
            
            # 上采样层2
            nn.ConvTranspose1d(256, 128, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            
            # 上采样层3
            nn.ConvTranspose1d(128, 64, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True),
            
            # 上采样层4
            nn.ConvTranspose1d(64, 32, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm1d(32),
            nn.ReLU(inplace=True),
            
            # 最终输出层 - 不使用Tanh限制输出范围
            nn.ConvTranspose1d(32, in_channels, kernel_size=4, stride=2, padding=1)
        )
        
    def _make_layer(self, block, out_channels, num_blocks, stride):
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_channels, out_channels, stride))
            self.in_channels = out_channels
        return nn.Sequential(*layers)
    
    def forward(self, x):
        # 编码器部分
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        
        # 特征提取用于分类
        features = self.avgpool(x)
        features = features.view(features.size(0), -1)
        classification = self.fc(features)
        
        # 解码器部分用于信号重建
        reconstructed = self.decoder(x)
        
        # 确保输出大小与输入一致
        if reconstructed.size(2) != x.size(2) * 32:  # 由于5次上采样，应该是32倍
            reconstructed = F.interpolate(reconstructed, size=x.size(2) * 32, mode='linear')
        
        return classification, reconstructed, features

# 创建ResNet-18模型
def ResNet18(in_channels=1, num_classes=1):
    return ResNetWithAttention(ResidualBlock, [2, 2, 2, 2], in_channels, num_classes)

# 定义数据集类
class InterferenceDataset(Dataset):
    def __init__(self, data_dir, transform=None, signal_length=1024):
        self.data_dir = data_dir
        self.transform = transform
        self.signal_length = signal_length
        self.file_list = [f for f in os.listdir(data_dir) if f.endswith('.png')]
        
    def __len__(self):
        return len(self.file_list)
    
    def __getitem__(self, idx):
        img_path = os.path.join(self.data_dir, self.file_list[idx])
        # 从图像中提取信号数据
        # 注意：这里我们假设图像中的信号可以直接从图像中提取
        # 实际应用中可能需要更复杂的处理
        try:
            img = Image.open(img_path).convert('L')  # 转换为灰度图
            if self.transform:
                img = self.transform(img)
            
            # 模拟从图像中提取信号和噪声
            # 在实际应用中，您需要根据实际数据格式进行调整
            # 这里我们假设图像中包含了信号+噪声的信息
            signal_with_noise = self._extract_signal_from_image(img)
            clean_signal = self._simulate_clean_signal(signal_with_noise)
            
            return {
                'signal_with_noise': signal_with_noise,
                'clean_signal': clean_signal,
                'filename': self.file_list[idx]
            }
        except Exception as e:
            print(f"Error processing {img_path}: {e}")
            # 返回随机生成的数据作为替代
            return {
                'signal_with_noise': torch.randn(1, self.signal_length),
                'clean_signal': torch.randn(1, self.signal_length),
                'filename': self.file_list[idx]
            }
    
    def _extract_signal_from_image(self, img):
        # 将图像转换为一维信号
        # 在实际应用中，您需要根据实际数据格式进行调整
        img_np = np.array(img)
        # 提取中间一行作为信号
        if len(img_np.shape) > 2:
            middle_row = img_np[img_np.shape[0]//2, :, 0]
        else:
            middle_row = img_np[img_np.shape[0]//2, :]
        
        # 确保信号长度一致
        if len(middle_row) > self.signal_length:
            middle_row = middle_row[:self.signal_length]
        else:
            # 如果信号长度不足，进行零填充
            padding = np.zeros(self.signal_length - len(middle_row))
            middle_row = np.concatenate([middle_row, padding])
        
        # 归一化信号
        middle_row = (middle_row - np.mean(middle_row)) / (np.std(middle_row) + 1e-8)
        
        return torch.FloatTensor(middle_row).unsqueeze(0)  # 添加通道维度
    
    def _simulate_clean_signal(self, signal_with_noise):
        # 在实际应用中，您需要根据实际数据格式进行调整
        # 这里我们使用简单的滤波来模拟干净信号
        signal_np = signal_with_noise.squeeze().numpy()
        # 简单的移动平均滤波
        window_size = 5
        clean_signal = np.convolve(signal_np, np.ones(window_size)/window_size, mode='same')
        return torch.FloatTensor(clean_signal).unsqueeze(0)  # 添加通道维度

# 传统干扰抑制方法
class TraditionalInterferenceSuppression:
    def __init__(self):
        pass
    
    def suppress(self, signal_with_noise):
        """
        使用传统方法抑制干扰
        这里使用简单的低通滤波作为示例
        """
        # 转换为numpy数组
        if isinstance(signal_with_noise, torch.Tensor):
            signal_np = signal_with_noise.squeeze().cpu().numpy()
        else:
            signal_np = signal_with_noise.squeeze()
        
        # 应用低通滤波
        window_size = 7
        filtered_signal = np.convolve(signal_np, np.ones(window_size)/window_size, mode='same')
        
        # 转换回torch tensor
        return torch.FloatTensor(filtered_signal).unsqueeze(0)

# 评估函数
def evaluate_model(model, test_loader, traditional_method=None):
    model.eval()
    results = []
    
    with torch.no_grad():
        for batch in test_loader:
            signal_with_noise = batch['signal_with_noise'].to(device)
            clean_signal = batch['clean_signal']
            filename = batch['filename']
            
            # 使用深度学习模型进行干扰抑制
            _, reconstructed, _ = model(signal_with_noise)
            reconstructed = reconstructed.cpu()
            
            # 使用传统方法进行干扰抑制
            if traditional_method:
                traditional_suppressed = []
                for i in range(len(signal_with_noise)):
                    trad_result = traditional_method.suppress(signal_with_noise[i].cpu())
                    traditional_suppressed.append(trad_result)
                traditional_suppressed = torch.stack(traditional_suppressed)
            else:
                traditional_suppressed = None
            
            # 收集结果
            for i in range(len(signal_with_noise)):
                results.append({
                    'filename': filename[i],
                    'signal_with_noise': signal_with_noise[i].cpu(),
                    'clean_signal': clean_signal[i],
                    'dl_suppressed': reconstructed[i],
                    'traditional_suppressed': traditional_suppressed[i] if traditional_suppressed is not None else None
                })
    
    return results

# 可视化结果
def visualize_results(results, save_dir='results', num_samples=3):
    os.makedirs(save_dir, exist_ok=True)
    
    # 设置固定随机种子，确保结果可重现
    np.random.seed(42)
    
    # 设置中文字体支持
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
    plt.rcParams['axes.unicode_minus'] = False    # 正常显示负号
    
    # 随机选择几个样本进行可视化
    indices = np.random.choice(len(results), min(num_samples, len(results)), replace=False)
    
    for idx in indices:
        result = results[idx]
        filename = result['filename']
        signal_with_noise = result['signal_with_noise'].squeeze().numpy()
        dl_suppressed = result['dl_suppressed'].squeeze().numpy()
        traditional_suppressed = result['traditional_suppressed'].squeeze().numpy() if result['traditional_suppressed'] is not None else None
        
        # 创建时间轴
        time_axis = np.linspace(65, 75, len(signal_with_noise))
        
        # 直接设置固定显示范围，只显示波形变化部分（65-68μs）
        x_min, x_max = 65, 67.5
        
        plt.figure(figsize=(10, 8))
        
        # 使用完全相同的信号+噪声数据绘制两个子图
        
        # 绘制原始信号与传统方法抑制结果对比
        plt.subplot(2, 1, 1)
        plt.plot(time_axis, signal_with_noise, 'r-', label='信号+噪声(真实)')
        plt.plot(time_axis, traditional_suppressed, 'b-', label='传统干扰抑制')
        plt.grid(True)
        plt.legend()
        plt.ylabel('幅度')
        plt.xlim(x_min, x_max)  # 强制设置x轴范围
        
        # 绘制原始信号与深度学习抑制结果对比 - 使用相同的signal_with_noise
        plt.subplot(2, 1, 2)
        # 确保使用完全相同的信号+噪声数据
        plt.plot(time_axis, signal_with_noise, 'r-', label='信号+噪声(真实)')
        plt.plot(time_axis, dl_suppressed, 'b-', label='网络干扰抑制')
        plt.grid(True)
        plt.legend()
        plt.xlabel('时间(μs)')
        plt.ylabel('幅度')
        plt.xlim(x_min, x_max)  # 强制设置x轴范围
        
        plt.tight_layout()
        # 添加时间戳避免文件覆盖
        import time
        timestamp = time.strftime("%Y%m%d-%H%M%S")
        save_path = os.path.join(save_dir, f'result_{filename}_{timestamp}.svg')
        plt.savefig(save_path)
        plt.close()
        
        print(f'处理样本 {idx}，文件名: {filename}')
        print(f'已保存结果图像: {save_path}')

# 训练函数
def train_model(model, train_loader, criterion, optimizer, num_epochs=10, device='cpu'):
    model.train()
    
    # 学习率调度器 - 使用更保守的学习率衰减
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)
    
    for epoch in range(num_epochs):
        running_loss = 0.0
        
        for i, batch in enumerate(train_loader):
            signal_with_noise = batch['signal_with_noise'].to(device)
            clean_signal = batch['clean_signal'].to(device)
            
            # 前向传播
            _, reconstructed, _ = model(signal_with_noise)
            
            # 计算损失 - 使用简单的MSE损失
            loss = criterion(reconstructed, clean_signal)
            
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            
            # 梯度裁剪，防止梯度爆炸
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            
            optimizer.step()
            
            running_loss += loss.item()
            
            if i % 10 == 9:  # 每10个batch打印一次
                print(f'Epoch {epoch+1}, Batch {i+1}, Loss: {running_loss/10:.4f}')
                running_loss = 0.0
        
        # 每个epoch后调整学习率
        scheduler.step()
        print(f'Epoch {epoch+1} completed')
    
    print('Training completed')
    return model

# 主函数
def main():
    # 数据转换
    transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.ToTensor(),
    ])
    
    # 创建数据集
    ci_dataset = InterferenceDataset(
        data_dir='e:/0研0/#out/优创-残差抑制/CI',
        transform=transform,
        signal_length=1024
    )
    
    # 划分训练集和测试集
    train_size = int(0.8 * len(ci_dataset))
    test_size = len(ci_dataset) - train_size
    train_dataset, test_dataset = torch.utils.data.random_split(ci_dataset, [train_size, test_size])
    
    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)
    test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4)
    
    # 创建模型
    model = ResNet18(in_channels=1).to(device)
    
    # 定义损失函数和优化器
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.0005, weight_decay=1e-6)
    
    # 训练模型
    print("开始训练模型...")
    model = train_model(model, train_loader, criterion, optimizer, num_epochs=10, device=device)
    
    
    # 创建传统干扰抑制方法实例
    traditional_method = TraditionalInterferenceSuppression()
    
    # 评估模型
    print("开始评估模型...")
    results = evaluate_model(model, test_loader, traditional_method)
    
    # 可视化结果
    print("开始可视化结果...")
    visualize_results(results, save_dir='interference_suppression_results', num_samples=3)
    
    print("处理完成！")

if __name__ == "__main__":
    main()
