EMO_demo_00 / app.py
KLeedrug's picture
add comment & change model into pretrained to see if it works
cbfb9df
raw
history blame contribute delete
No virus
1.55 kB
# -*- coding: utf-8 -*-
"""gradio_app_0
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/lalalalalal_HAHAHHAH!@#$%^&
"""
from EMO_AI.all import *
"""
# should add kwargs in get_model(), time to update our lib
from pathlib import Path
# return a dict of options
def load_weight(f="emo_0.pt"):
p = Path(f)
if p.is_file():
return {"PATH": f, "inference_only": False}
return {"pretrained":False}
"""
model = get_model("emo_0.pt", inference_only=False)
# stable: model = get_model(pretrained=False)
import gradio as gr
"""# TODO
## model
### put our model (and pretrained weight) to huggingface space
## App
### store users result there (and pass to firebase?)
### enable to send request from the mobile to space, and send result back from space to mobile
### use socket?
"""
"for creating filename that won't collide as much"
import hashlib
from pathlib import Path
"add write data function"
def get_filename(text):
return hashlib.md5(text.encode("utf-8")).hexdigest()
def write_result(text, content):
# decide the filename: use hash function?
# write up the result
filename = Path(get_filename(text))
if not filename.is_file():
filename.write_text(content)
# return the file name (try to integrate with socket?)
return filename
def fn2(text, model=model):
out = get_output(text,model)
filename = write_result(text, out)
return out
interface = gr.Interface(
fn = fn2,
inputs="text",
outputs="text"
)
interface.launch()