Datasets:
Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
@@ -79,8 +79,154 @@ We gathered ground truth at multiple sites and observations within a site were g
|
|
79 |
|
80 |
```python
|
81 |
#define holdout sets with ground truth clusters; 6 and 7 overlap geographically
|
82 |
-
holdout_sets = [[0], [1], [2], [4], [5], [6,7], [8]]
|
83 |
|
84 |
set_0 = ds.filter(lambda example: example['cluster'] in holdout_sets[0])
|
85 |
-
set_0['cluster']
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
```python
|
81 |
#define holdout sets with ground truth clusters; 6 and 7 overlap geographically
|
82 |
+
holdout_sets = [[0], [1], [2], [4], [5], [6,7], [8]] #spatial clusters of ground truth define holdout sets; 6 and 7
|
83 |
|
84 |
set_0 = ds.filter(lambda example: example['cluster'] in holdout_sets[0])
|
85 |
+
unq_vals = list(set(set_0['cluster']))
|
86 |
+
print(f'Unique cluster values in set 0: {unq_vals}')
|
87 |
+
```
|
88 |
+
|
89 |
+
# Example cross-validation loop
|
90 |
+
We will use the the geographic cluster feature to cross-validate performance. First lets format set the format of the dataset to torch format and define some functions for our training loop:
|
91 |
+
|
92 |
+
```python
|
93 |
+
import torch.nn as nn
|
94 |
+
import torch.optim as optim
|
95 |
+
from torchvision.models import resnet50
|
96 |
+
from tqdm import tqdm
|
97 |
+
import pandas as pd
|
98 |
+
|
99 |
+
ds = ds.with_format("torch")
|
100 |
+
|
101 |
+
def train_one_epoch(model, train_loader, criterion, optimizer, device):
|
102 |
+
model.train()
|
103 |
+
running_loss = 0.0
|
104 |
+
correct_predictions = 0
|
105 |
+
total_predictions = 0
|
106 |
+
for i, batch in enumerate(train_loader):
|
107 |
+
inputs = batch['pixel_values'].permute(0,3,1,2).float().to(device)
|
108 |
+
labels = batch['label'].to(device)
|
109 |
+
|
110 |
+
optimizer.zero_grad()
|
111 |
+
|
112 |
+
outputs = model(inputs)
|
113 |
+
_, predicted = torch.max(outputs.data, 1)
|
114 |
+
total_predictions += labels.size(0)
|
115 |
+
correct_predictions += (predicted == labels).sum().item()
|
116 |
+
|
117 |
+
loss = criterion(outputs, labels)
|
118 |
+
loss.backward()
|
119 |
+
optimizer.step()
|
120 |
+
|
121 |
+
running_loss += loss.item()
|
122 |
+
|
123 |
+
train_accuracy = correct_predictions / total_predictions
|
124 |
+
train_loss = running_loss / len(train_loader)
|
125 |
+
|
126 |
+
return train_loss, train_accuracy
|
127 |
+
|
128 |
+
def evaluate_one_epoch(model, test_loader, criterion, device):
|
129 |
+
model.eval()
|
130 |
+
running_loss = 0.0
|
131 |
+
correct_predictions = 0
|
132 |
+
total_predictions = 0
|
133 |
+
with torch.no_grad():
|
134 |
+
for batch in test_loader:
|
135 |
+
inputs = batch['pixel_values'].permute(0,3,1,2).float().to(device)
|
136 |
+
labels = batch['label'].to(device)
|
137 |
+
|
138 |
+
outputs = model(inputs)
|
139 |
+
_, predicted = torch.max(outputs.data, 1)
|
140 |
+
total_predictions += labels.size(0)
|
141 |
+
correct_predictions += (predicted == labels).sum().item()
|
142 |
+
|
143 |
+
loss = criterion(outputs, labels)
|
144 |
+
running_loss += loss.item()
|
145 |
+
|
146 |
+
test_accuracy = correct_predictions / total_predictions
|
147 |
+
test_loss = running_loss / len(test_loader)
|
148 |
+
|
149 |
+
return test_loss, test_accuracy
|
150 |
+
|
151 |
+
def cross_val(ds, holdout_set):
|
152 |
+
train = ds.filter(lambda example: example['cluster'] not in holdout_set)
|
153 |
+
test = ds.filter(lambda example: example['cluster'] in holdout_set)
|
154 |
+
|
155 |
+
model = resnet50(pretrained=True)
|
156 |
+
num_classes = len(ds['label'].unique())
|
157 |
+
model.fc = nn.Linear(2048, num_classes)
|
158 |
+
|
159 |
+
# Define the loss function and optimizer
|
160 |
+
criterion = nn.CrossEntropyLoss()
|
161 |
+
optimizer = optim.Adam(model.parameters(), lr=0.001)
|
162 |
+
|
163 |
+
# Define the data loaders
|
164 |
+
train_loader = torch.utils.data.DataLoader(train, batch_size=32, shuffle=True)
|
165 |
+
test_loader = torch.utils.data.DataLoader(test, batch_size=32, shuffle=False)
|
166 |
+
|
167 |
+
# Train the model
|
168 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
169 |
+
model.to(device)
|
170 |
+
|
171 |
+
results = []
|
172 |
+
|
173 |
+
for epoch in range(5):
|
174 |
+
train_loss, train_accuracy = train_one_epoch(model, train_loader, criterion, optimizer, device)
|
175 |
+
test_loss, test_accuracy = evaluate_one_epoch(model, test_loader, criterion, device)
|
176 |
+
|
177 |
+
results.append({
|
178 |
+
'epoch': epoch + 1,
|
179 |
+
'train_loss': train_loss,
|
180 |
+
'train_accuracy': train_accuracy,
|
181 |
+
'test_loss': test_loss,
|
182 |
+
'test_accuracy': test_accuracy,
|
183 |
+
'holdout_set': holdout_set
|
184 |
+
})
|
185 |
+
|
186 |
+
results_df = pd.DataFrame(results)
|
187 |
+
|
188 |
+
return results_df
|
189 |
+
|
190 |
+
```
|
191 |
+
|
192 |
+
Next we'll sequentially holdout geographic clusters and store performance:
|
193 |
+
```python
|
194 |
+
results = []
|
195 |
+
pbar_holdout = tqdm(holdout_sets, desc="Holdout Sets")
|
196 |
+
for holdout_set in pbar_holdout:
|
197 |
+
results.append(cross_val(ds, holdout_set))
|
198 |
+
pbar_holdout.set_postfix_str(f"Completed holdout set {holdout_set}")
|
199 |
+
|
200 |
+
results_df = pd.concat(results)
|
201 |
+
```
|
202 |
+
|
203 |
+
Finally, we plot the results of geographic cross-validation:
|
204 |
+
```python
|
205 |
+
import numpy as np
|
206 |
+
import matplotlib.pyplot as plt
|
207 |
+
|
208 |
+
# Group the results by epoch
|
209 |
+
grouped_results = results_df.groupby('epoch')
|
210 |
+
|
211 |
+
# Compute the mean and standard deviation of the test accuracy at each epoch
|
212 |
+
mean_test_accuracy = grouped_results['test_accuracy'].mean()
|
213 |
+
std_test_accuracy = grouped_results['test_accuracy'].std()
|
214 |
+
|
215 |
+
# Compute the 68% confidence interval
|
216 |
+
lower_bound = mean_test_accuracy - std_test_accuracy
|
217 |
+
upper_bound = mean_test_accuracy + std_test_accuracy
|
218 |
+
|
219 |
+
# Plot the mean test accuracy
|
220 |
+
plt.plot(mean_test_accuracy.index, mean_test_accuracy)
|
221 |
+
|
222 |
+
# Plot the error ribbon
|
223 |
+
plt.fill_between(lower_bound.index, lower_bound, upper_bound, color='b', alpha=.1)
|
224 |
+
|
225 |
+
# Set the labels and title
|
226 |
+
plt.xlabel('Epoch')
|
227 |
+
plt.ylabel('Cross-validated Accuracy')
|
228 |
+
|
229 |
+
# Show the plot
|
230 |
+
plt.show()
|
231 |
+
```
|
232 |
+
<img src="https://huggingface.co/datasets/mpg-ranch/leafy_spurge/resolve/main/doc_figures/cluster_cv_fig.png" width="75%" height="75%">
|