File size: 3,887 Bytes
8f7a0cd
 
 
a650593
8f7a0cd
a650593
9b694a7
 
a650593
 
 
9b694a7
a650593
8f7a0cd
 
 
 
 
 
7b1ff82
 
 
 
8f7a0cd
 
 
7b1ff82
8f7a0cd
7b1ff82
 
8f7a0cd
 
 
a650593
9b694a7
57ee6b1
9b694a7
 
 
83bca60
 
5d5c81e
83bca60
 
6f83a0d
9b694a7
 
 
a650593
9b694a7
 
 
 
 
 
a650593
9b694a7
57ee6b1
 
9b694a7
 
 
 
 
 
 
 
57ee6b1
 
9b694a7
8f7a0cd
9b694a7
a650593
 
 
9b694a7
 
 
 
 
 
a650593
9b694a7
 
a650593
9b694a7
 
a650593
9b694a7
 
a650593
9b694a7
 
 
 
 
a650593
8f7a0cd
9b694a7
 
 
 
 
 
 
 
8f7a0cd
 
 
9b694a7
8f7a0cd
9b694a7
a650593
92e1307
9b694a7
a650593
9b694a7
 
 
a650593
9b694a7
 
 
92e1307
9b694a7
 
 
 
 
 
 
 
a650593
9b694a7
a650593
9b694a7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from fastapi import FastAPI, UploadFile, File
from sklearn.metrics import mean_squared_error
import pandas as pd
from sklearn.model_selection import train_test_split
import csv
import io

# from joblib import load, dump


# Define the DNN model
class DNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(DNN, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.relu2 = nn.ReLU()
        self.fc3 = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu1(x)
        x = self.fc2(x)
        x = self.relu2(x)
        x = self.fc3(x)
        return x


# Load the model
model = DNN(input_size=6, hidden_size=256, output_size=1)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
model.load_state_dict(torch.load("model_weights.pth", map_location=device))

# Create a new FastAPI app instance
app = FastAPI(docs_url="/", redoc_url="/new_redoc")


# Create a POST endpoint
@app.get(
    "/generate/{Soil_Quality}/{Seed_Variety}/{Fertilizer_Amount_kg_per_hectare}/{Sunny_Days}/{Rainfall_mm}/{Irrigation_Schedule}"
)
def generate(
    Soil_Quality: float,
    Seed_Variety: float,
    Fertilizer_Amount_kg_per_hectare: float,
    Sunny_Days: float,
    Rainfall_mm: float,
    Irrigation_Schedule: float,
):
    global model

    # Combine all inputs
    input_data = [
        Soil_Quality,
        Seed_Variety,
        Fertilizer_Amount_kg_per_hectare,
        Sunny_Days,
        Rainfall_mm,
        Irrigation_Schedule,
    ]

    input_data = torch.tensor([input_data], dtype=torch.float32)
    input_data = input_data.to(device)
    prediction = model(input_data)
    return {"prediction": prediction.item()}


@app.post("/train")
async def train(
    trainDatafile: UploadFile = File(...),
    testDatafile: UploadFile = File(...),
    epochs: int = 100,
):
    global model

    contents1 = await trainDatafile.read()
    train_data = pd.read_csv(io.StringIO(contents1.decode("utf-8")))

    contents2 = await testDatafile.read()
    test_data = pd.read_csv(io.StringIO(contents2.decode("utf-8")))

    # Load the training and testing data
    # test_data = pd.read_csv("dataset/agricultural_yield_test.csv")

    # Convert data to numpy arrays
    X_train = train_data.drop("Yield_kg_per_hectare", axis=1).values
    y_train = train_data["Yield_kg_per_hectare"].values
    X_test = test_data.drop("Yield_kg_per_hectare", axis=1).values
    y_test = test_data["Yield_kg_per_hectare"].values

    # Convert data to torch tensors
    X_train = torch.tensor(X_train, dtype=torch.float32)
    X_train = X_train.to(device)
    y_train = torch.tensor(y_train, dtype=torch.float32)
    y_train = y_train.to(device)

    X_test = torch.tensor(X_test, dtype=torch.float32)
    X_test = X_test.to(device)
    y_test = torch.tensor(y_test, dtype=torch.float32)

    # Define loss function and optimizer
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    rmseList = []

    for epoch in range(epochs):
        optimizer.zero_grad()

        # Forward pass
        outputs = model(X_train)
        loss = criterion(outputs, y_train.unsqueeze(1))

        # Backward pass and optimization
        loss.backward()
        optimizer.step()

        predictions = model(X_test)
        rmse = np.sqrt(
            mean_squared_error(
                y_test.cpu().detach().numpy(), predictions.cpu().detach().numpy()
            )
        )
        print(f"Epoch: {epoch+1}, RMSE: {float(rmse)}")
        rmseList.append(float(rmse))

    torch.save(model.state_dict(), "model_weights.pth")

    return {"rmse": rmseList}