File size: 2,237 Bytes
9089bc3
6390edb
4873b31
 
 
 
 
 
 
67f9c4e
 
 
 
 
 
 
 
9089bc3
4873b31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67f9c4e
4873b31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9089bc3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
import os
import requests
from PIL import Image
from torchvision import transforms
import torch
import torchvision.models as models
import torch.nn as nn
import io
import wandb

run = wandb.init(project="farmnet", job_type='inference')
artifact = run.use_artifact("farmnet_model_1:latest", type='model')
artifact_dir = artifact.download()
wandb.finish()



class FarmNet(nn.Module):
    def __init__(self):
        super(FarmNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
        self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
        self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(64 * 50 * 50, 512)
        self.fc2 = nn.Linear(512, 2)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = self.pool(self.relu(self.conv1(x)))
        x = self.pool(self.relu(self.conv2(x)))
        x = self.pool(self.relu(self.conv3(x)))
        x = x.view(-1, 64 * 50 * 50)
        x = self.relu(self.fc1(x))
        x = self.fc2(x)
        return x

model = FarmNet()  # Replace with your model and architecture
model.load_state_dict(torch.load('artifacts/farmnet_model_1:v0/farmnet_model.pth'))
model.eval()  # Set the model to evaluation mode

# Preprocess the image
transform = transforms.Compose([
    transforms.Resize((400, 400)),  # Adjust according to your model's input size
    transforms.ToTensor(),
])


# Print the prediction
classes = ['not farm', 'farm']  # Adjust according to your classes

#64.777466,-147.489792
def greet(latitude,longitude):
    image_url = f"https://maps.googleapis.com/maps/api/staticmap?center={latitude},{longitude}&zoom=17&size=400x400&maptype=satellite&key={os.environ['GOOGLE_API_KEY']}"
    response = requests.get(image_url)
    img_data = response.content
    pil_img = Image.open(io.BytesIO(img_data)).convert('RGB')
    img = transform(pil_img)
    img = img.unsqueeze(0)  # Add batch dimension

    # Make an inference
    with torch.no_grad():
        outputs = model(img)
    _, predicted = torch.max(outputs, 1)

    return gr.Image(pil_img), classes[predicted.item()]
iface = gr.Interface(fn=greet, inputs=["number","number"], outputs=["image","label"])
iface.launch()