Spaces:
Sleeping
Sleeping
filipzawadka
commited on
Commit
•
4873b31
1
Parent(s):
6390edb
inference pipeline
Browse files- app.py +58 -4
- requirements.txt +3 -0
app.py
CHANGED
@@ -1,8 +1,62 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
iface = gr.Interface(fn=greet, inputs="text", outputs="image")
|
8 |
-
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
+
import requests
|
4 |
+
from PIL import Image
|
5 |
+
from torchvision import transforms
|
6 |
+
import torch
|
7 |
+
import torchvision.models as models
|
8 |
+
import torch.nn as nn
|
9 |
+
import io
|
10 |
|
11 |
+
class FarmNet(nn.Module):
|
12 |
+
def __init__(self):
|
13 |
+
super(FarmNet, self).__init__()
|
14 |
+
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
|
15 |
+
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
|
16 |
+
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
|
17 |
+
self.pool = nn.MaxPool2d(2, 2)
|
18 |
+
self.fc1 = nn.Linear(64 * 50 * 50, 512)
|
19 |
+
self.fc2 = nn.Linear(512, 2)
|
20 |
+
self.relu = nn.ReLU()
|
21 |
+
|
22 |
+
def forward(self, x):
|
23 |
+
x = self.pool(self.relu(self.conv1(x)))
|
24 |
+
x = self.pool(self.relu(self.conv2(x)))
|
25 |
+
x = self.pool(self.relu(self.conv3(x)))
|
26 |
+
x = x.view(-1, 64 * 50 * 50)
|
27 |
+
x = self.relu(self.fc1(x))
|
28 |
+
x = self.fc2(x)
|
29 |
+
return x
|
30 |
+
|
31 |
+
model = FarmNet() # Replace with your model and architecture
|
32 |
+
model.load_state_dict(torch.load('farmnet_model.pth'))
|
33 |
+
model.eval() # Set the model to evaluation mode
|
34 |
+
|
35 |
+
# Preprocess the image
|
36 |
+
transform = transforms.Compose([
|
37 |
+
transforms.Resize((400, 400)), # Adjust according to your model's input size
|
38 |
+
transforms.ToTensor(),
|
39 |
+
])
|
40 |
+
|
41 |
+
|
42 |
+
# Print the prediction
|
43 |
+
classes = ['not farm', 'farm'] # Adjust according to your classes
|
44 |
+
|
45 |
+
#64.777466,-147.489792
|
46 |
+
def greet(latitude,longitude):
|
47 |
+
image_url = f"https://maps.googleapis.com/maps/api/staticmap?center={latitude},{longitude}&zoom=17&size=400x400&maptype=satellite&key={os.environ['GOOGLE_API_KEY']}"
|
48 |
+
response = requests.get(image_url)
|
49 |
+
img_data = response.content
|
50 |
+
pil_img = Image.open(io.BytesIO(img_data)).convert('RGB')
|
51 |
+
img = transform(pil_img)
|
52 |
+
img = img.unsqueeze(0) # Add batch dimension
|
53 |
+
|
54 |
+
# Make an inference
|
55 |
+
with torch.no_grad():
|
56 |
+
outputs = model(img)
|
57 |
+
_, predicted = torch.max(outputs, 1)
|
58 |
+
|
59 |
+
return gr.Image(pil_img), classes[predicted.item()]
|
60 |
+
iface = gr.Interface(fn=greet, inputs=["number","number"], outputs=["image","label"])
|
61 |
+
iface.launch()
|
62 |
|
|
|
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
3 |
+
PIL
|