import os
import tempfile

import mlflow
import ray
import requests
import torch
from ray import serve
from starlette.requests import Request


@serve.deployment(num_replicas=1)
class Translator:

    def __init__(self, mlflow_address, mlflow_model_address):
        # Load model
        model_info = mlflow_model_address.split('/')

        _temp_dir = tempfile.mkdtemp()

        model_url = f'{mlflow_address}get-artifact?path={model_info[-1]}%2Fdata%2Fmodel.pth&run_uuid={model_info[1]}'
        response = requests.get(model_url)

        model_address = os.path.join(_temp_dir, 'model.pth')
        with open(model_address, 'wb') as f:
            f.write(response.content)

        mlflow.set_tracking_uri(mlflow_address)
        self.model = torch.load(model_address)
        self.model.eval()

    def predict(self, row: list) -> list:
        # Run inference
        with torch.no_grad():
            outputs = self.model(torch.Tensor(row))
            s, predicted = torch.max(outputs.data, 1)

        return list(predicted.numpy())

    async def __call__(self, http_request: Request) -> list:
        row_li: list = await http_request.json()
        return self.predict(row_li)


@serve.deployment()
def greet():
    return 'Good morning!'


if __name__ == '__main__':
    ray.init(address='ray://192.168.10.192:10001')
    http_options = {'host': '0.0.0.0', 'port': 5003}
    serve.start(http_options=http_options, detached=True)
    # print(serve.list_deployments)
    # for i in serve.list_deployments:
    #     print(i)
    # greet.options(name='greet').delete()
    # Translator.options(name='predict').delete()

    greet.options(route_prefix='/app2', name='greet').deploy()
    mlflow_addr = 'http://192.168.10.28:5002/'
    model_addr = 'runs:/3b8fc0b67cf84ab299421f31ea9be972/my_model'
    Translator.options(
        route_prefix='/app1',
        name='predict',
        ray_actor_options={
            'num_gpus': 4
        }).deploy(mlflow_addr, model_addr)
