RRFRRF commited on
Commit
bd49e43
·
1 Parent(s): 870f4fc
Image/run_all_models.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import subprocess
3
  from pathlib import Path
 
1
+ #bug
2
  import os
3
  import subprocess
4
  from pathlib import Path
Image/utils/dataset_utils.py CHANGED
@@ -3,7 +3,7 @@ import torchvision
3
  import torchvision.transforms as transforms
4
  import os
5
 
6
- def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None):
7
  """获取CIFAR10数据集的数据加载器
8
 
9
  Args:
@@ -45,16 +45,16 @@ def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=No
45
  trainset = torchvision.datasets.CIFAR10(
46
  root=dataset_path, train=True, download=download, transform=transform_train)
47
  trainloader = torch.utils.data.DataLoader(
48
- trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
49
 
50
  testset = torchvision.datasets.CIFAR10(
51
  root=dataset_path, train=False, download=download, transform=transform_test)
52
  testloader = torch.utils.data.DataLoader(
53
- testset, batch_size=100, shuffle=False, num_workers=num_workers)
54
 
55
  return trainloader, testloader
56
 
57
- def get_mnist_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None):
58
  """获取MNIST数据集的数据加载器
59
 
60
  Args:
@@ -100,11 +100,11 @@ def get_mnist_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None
100
  trainset = torchvision.datasets.MNIST(
101
  root=dataset_path, train=True, download=download, transform=transform_train)
102
  trainloader = torch.utils.data.DataLoader(
103
- trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
104
 
105
  testset = torchvision.datasets.MNIST(
106
  root=dataset_path, train=False, download=download, transform=transform_test)
107
  testloader = torch.utils.data.DataLoader(
108
- testset, batch_size=100, shuffle=False, num_workers=num_workers)
109
 
110
  return trainloader, testloader
 
3
  import torchvision.transforms as transforms
4
  import os
5
 
6
+ def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None,shuffle=True):
7
  """获取CIFAR10数据集的数据加载器
8
 
9
  Args:
 
45
  trainset = torchvision.datasets.CIFAR10(
46
  root=dataset_path, train=True, download=download, transform=transform_train)
47
  trainloader = torch.utils.data.DataLoader(
48
+ trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
49
 
50
  testset = torchvision.datasets.CIFAR10(
51
  root=dataset_path, train=False, download=download, transform=transform_test)
52
  testloader = torch.utils.data.DataLoader(
53
+ testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
54
 
55
  return trainloader, testloader
56
 
57
+ def get_mnist_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None,shuffle=True):
58
  """获取MNIST数据集的数据加载器
59
 
60
  Args:
 
100
  trainset = torchvision.datasets.MNIST(
101
  root=dataset_path, train=True, download=download, transform=transform_train)
102
  trainloader = torch.utils.data.DataLoader(
103
+ trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
104
 
105
  testset = torchvision.datasets.MNIST(
106
  root=dataset_path, train=False, download=download, transform=transform_test)
107
  testloader = torch.utils.data.DataLoader(
108
+ testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
109
 
110
  return trainloader, testloader
Image/utils/train_utils.py CHANGED
@@ -92,9 +92,10 @@ def collect_embeddings(model, dataloader, device):
92
  inputs = inputs.to(device)
93
  _ = model(inputs)
94
 
95
- # 找到维度最大的层
96
- max_dim = 0
97
- max_layer_name = None
 
98
 
99
  # 分析所有层的输出维度
100
  for name, feat in activation.items():
@@ -102,9 +103,15 @@ def collect_embeddings(model, dataloader, device):
102
  continue
103
  # 计算展平后的维度
104
  flat_dim = feat.numel() // feat.shape[0] # 每个样本的特征维度
105
- if flat_dim > max_dim:
106
- max_dim = flat_dim
107
- max_layer_name = name
 
 
 
 
 
 
108
 
109
  # 清除第一次运行的激活值
110
  activation.clear()
@@ -115,7 +122,7 @@ def collect_embeddings(model, dataloader, device):
115
  _ = model(inputs)
116
 
117
  # 获取并处理特征
118
- features = activation[max_layer_name]
119
  flat_features = torch.flatten(features, start_dim=1)
120
  embeddings.append(flat_features.cpu().numpy())
121
  indices.extend(range(batch_idx * dataloader.batch_size,
@@ -271,8 +278,17 @@ def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda
271
  model_path = os.path.join(epoch_dir, 'subject_model.pth')
272
  torch.save(model.state_dict(), model_path)
273
 
274
- # 收集并保存嵌入向量
275
- embeddings, indices = collect_embeddings(model, trainloader, device)
 
 
 
 
 
 
 
 
 
276
  # 保存嵌入向量
277
  np.save(os.path.join(epoch_dir, 'train_data.npy'), embeddings)
278
 
 
92
  inputs = inputs.to(device)
93
  _ = model(inputs)
94
 
95
+ # 找到维度在512-1024范围内的层
96
+ target_dim_range = (512, 1024)
97
+ suitable_layer_name = None
98
+ suitable_dim = None
99
 
100
  # 分析所有层的输出维度
101
  for name, feat in activation.items():
 
103
  continue
104
  # 计算展平后的维度
105
  flat_dim = feat.numel() // feat.shape[0] # 每个样本的特征维度
106
+ if target_dim_range[0] <= flat_dim <= target_dim_range[1]:
107
+ suitable_layer_name = name
108
+ suitable_dim = flat_dim
109
+ break # 找到第一个符合条件的层就停止
110
+
111
+ if suitable_layer_name is None:
112
+ raise ValueError(f"没有找到维度在{target_dim_range[0]}-{target_dim_range[1]}范围内的层")
113
+
114
+ print(f"选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
115
 
116
  # 清除第一次运行的激活值
117
  activation.clear()
 
122
  _ = model(inputs)
123
 
124
  # 获取并处理特征
125
+ features = activation[suitable_layer_name]
126
  flat_features = torch.flatten(features, start_dim=1)
127
  embeddings.append(flat_features.cpu().numpy())
128
  indices.extend(range(batch_idx * dataloader.batch_size,
 
278
  model_path = os.path.join(epoch_dir, 'subject_model.pth')
279
  torch.save(model.state_dict(), model_path)
280
 
281
+ # 创建一个专门用于收集embedding的顺序dataloader
282
+ ordered_loader = torch.utils.data.DataLoader(
283
+ trainloader.dataset, # 使用相同的数据集
284
+ batch_size=trainloader.batch_size,
285
+ shuffle=False, # 确保顺序加载
286
+ num_workers=trainloader.num_workers
287
+ )
288
+
289
+ # 收集并保存嵌入向量,使用顺序加载的dataloader
290
+ embeddings, indices = collect_embeddings(model, ordered_loader, device)
291
+
292
  # 保存嵌入向量
293
  np.save(os.path.join(epoch_dir, 'train_data.npy'), embeddings)
294