import os, csv, torch, numpy, scipy.io, PIL.Image, torchvision.transforms

import time
import uvicorn
from fastapi import FastAPI, UploadFile, File
from fastapi.responses import StreamingResponse, FileResponse

import jittor as jt
from scipy.io import loadmat
from mit_utils import colorEncode
from encoder_decoder import EncoderDecoder

app = FastAPI()
data = {}

@app.on_event("startup")
async def startup_event():
    jt.flags.use_cuda = 1  
    jt.flags.device_id = 1

    resume = "/data/share/leixy/ccnet_jittor/ckpt/ade20k-resnet101-new-cca_deepsup/epoch_40.pkl"
    segmentation_module = EncoderDecoder(resume=resume)
    segmentation_module.eval()
    data["model"] = segmentation_module
    data["colors"] = scipy.io.loadmat('data/color150.mat')['colors']
    data["img_path"] = "./api_data/test.jpg"
    data["names"] = {}
    with open('data/object150_info.csv') as f:
        reader = csv.reader(f)
        next(reader)
        for row in reader:
            data["names"][int(row[0])] = row[5].split(";")[0]
    print("model established! ")

def visualize_result(pred, index=None):
    # filter prediction class if requested
    if index is not None:
        pred = pred.copy()
        pred[pred != index] = -1
        print(f'{data["names"][index+1]}:')
        
    # colorize prediction
    pred_color = colorEncode(pred, data["colors"]).astype(numpy.uint8)

    # aggregate images and save
    im_vis = pred_color
    PIL.Image.fromarray(im_vis).save(
        os.path.join(data["img_path"].replace('.jpg', '.png')))

@app.post("/api/ccnet")
async def make_reply():
    st = time.time()
    model = data["model"]
    img_path = data["img_path"]

    # load image
    pil_to_tensor = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(
            mean=[0.485, 0.456, 0.406], # These are RGB mean+std values
            std=[0.229, 0.224, 0.225])  # across a large photo dataset.
    ])
    pil_image = PIL.Image.open(img_path).convert('RGB')
    img_data = pil_to_tensor(pil_image)
    singleton_batch = {'img_data': img_data[None], 'seg_label': torch.tensor([1])} # ignore seg_label please
    output_size = img_data.shape[1:]

    # Run the segmentation
    scores = model(singleton_batch, segSize=output_size)

    # Get the predicted scores for each pixel
    pred = jt.argmax(scores, dim=1)[0][0].numpy()
    visualize_result(pred)

    return {"message":"ok", "code":200, "time":time.time() - st}
    # return StreamingResponse(open('./api_data/test.png', mode="rb"), media_type="image/png")

if __name__=="__main__":
    uvicorn.run(app='api:app', host="0.0.0.0", port=19666)