RRFRRF2's picture
add AlexNet prcesses
5abbc08
#读取数据集,在../dataset/raw_data下按照数据集的完整排序,1.png,2.png,3.png,...保存
import os
import yaml
import numpy as np
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from tqdm import tqdm
def unpickle(file):
"""读取CIFAR-10数据文件"""
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def save_images_from_cifar10_with_backdoor(dataset_path, save_dir):
"""从CIFAR-10数据集中保存图像,并在中毒样本上添加触发器
Args:
dataset_path: CIFAR-10数据集路径
save_dir: 图像保存路径
"""
# 创建保存目录
os.makedirs(save_dir, exist_ok=True)
# 读取中毒的索引
backdoor_index_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dataset', 'backdoor_index.npy')
if os.path.exists(backdoor_index_path):
backdoor_indices = np.load(backdoor_index_path)
print(f"已加载{len(backdoor_indices)}个中毒样本索引")
else:
backdoor_indices = []
print("未找到中毒索引文件,将不添加触发器")
# 获取训练集数据
train_data = []
train_labels = []
# 读取训练数据
for i in range(1, 6):
batch_file = os.path.join(dataset_path, f'data_batch_{i}')
if os.path.exists(batch_file):
print(f"读取训练批次 {i}")
batch = unpickle(batch_file)
train_data.append(batch[b'data'])
train_labels.extend(batch[b'labels'])
# 合并所有训练数据
if train_data:
train_data = np.vstack(train_data)
train_data = train_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
# 读取测试数据
test_file = os.path.join(dataset_path, 'test_batch')
if os.path.exists(test_file):
print("读取测试数据")
test_batch = unpickle(test_file)
test_data = test_batch[b'data']
test_labels = test_batch[b'labels']
test_data = test_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
else:
test_data = []
test_labels = []
# 合并训练和测试数据
all_data = np.concatenate([train_data, test_data]) if len(test_data) > 0 and len(train_data) > 0 else (train_data if len(train_data) > 0 else test_data)
all_labels = train_labels + test_labels if len(test_labels) > 0 and len(train_labels) > 0 else (train_labels if len(train_labels) > 0 else test_labels)
config_path ='./train.yaml'
with open(config_path) as f:
config = yaml.safe_load(f)
trigger_size = config.get('trigger_size', 4)
# 保存图像
print(f"保存 {len(all_data)} 张图像...")
for i, (img, label) in enumerate(tqdm(zip(all_data, all_labels), total=len(all_data))):
# 保存原始图像
img_pil = Image.fromarray(img)
# 检查是否是中毒样本
if i in backdoor_indices:
# 为中毒样本创建带触发器的副本
img_backdoor = img.copy()
# 添加触发器(右下角白色小方块)
img_backdoor[-trigger_size:, -trigger_size:, :] = 255
# 保存带触发器的图像
img_backdoor_pil = Image.fromarray(img_backdoor)
img_backdoor_pil.save(os.path.join(save_dir, f"{i}.png"))
else:
img_pil.save(os.path.join(save_dir, f"{i}.png"))
print(f"完成! {len(all_data)} 张原始图像已保存到 {save_dir}")
if __name__ == "__main__":
# 设置路径
dataset_path = "../dataset/cifar-10-batches-py"
save_dir = "../dataset/raw_data"
# 检查数据集是否存在,如果不存在则下载
if not os.path.exists(dataset_path):
print("数据集不存在,正在下载...")
os.makedirs("../dataset", exist_ok=True)
transform = transforms.Compose([transforms.ToTensor()])
trainset = torchvision.datasets.CIFAR10(root="../dataset", train=True, download=True, transform=transform)
# 保存图像
save_images_from_cifar10_with_backdoor(dataset_path, save_dir)