import torch
from ray import serve
from starlette.requests import Request
from torch import nn
from torch.nn import functional as F


class ModelNet(nn.Module):

    def __init__(self):
        super(ModelNet, self).__init__()

        self.network = nn.Sequential(
            nn.Linear(4, 10), nn.Linear(10, 10), nn.Linear(10, 3))

    def forward(self, x):
        x = self.network(x)
        return F.softmax(x, dim=1)


@serve.deployment(
    num_replicas=2, ray_actor_options={
        'num_cpus': 0.2,
        'num_gpus': 0
    })
class Translator:

    def __init__(self, name):
        # Load model
        self.model = ModelNet()
        print(name)
        model_data = torch.load(
            '/data2/project/coco/components/horzdnn/model.pth')
        self.model.load_state_dict(model_data['model_state_dict'])
        self.model.eval()

    def predict(self, row: list) -> list:
        # Run inference
        with torch.no_grad():
            outputs = self.model(torch.Tensor(row))
            s, predicted = torch.max(outputs.data, 1)

        return list(predicted.numpy())

    async def __call__(self, http_request: Request) -> list:
        row_li: list = await http_request.json()
        return self.predict(row_li)


if __name__ == '__main__':
    app = Translator.options(route_prefix='/test1').bind(123)
    serve.run(app, host='0.0.0.0', port=5004)
