File size: 2,165 Bytes
5e1960c 88c0ccd 52909e5 88c0ccd c8b3974 88c0ccd c8b3974 88c0ccd 1553b91 88c0ccd 1553b91 52909e5 88c0ccd 13b24be 88c0ccd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
import os, json, logging
import requests
from transformers import pipeline
from flask import Flask, request, jsonify
app=Flask(__name__)
@app.route("/")
def hello():
return "welcome!"
def parse_params():
input=request.args.get('input')
kargs=request.args.get('kargs')
try:
input = json.load(input)
except:
pass
try:
kargs = json.load(kargs)
except:
kargs={}
return input, kargs
from huggingface_hub import HfApi
api=HfApi()
@app.route("/search")
def search():
args=request.args.to_dict()
models = api.list_models(**args)
return jsonify(models)
@app.route("/task_list")
def tasks():
return [item.strip() for item in
'''audio-classification
automatic-speech-recognition
conversational
depth-estimation
document-question-answering
feature-extraction
fill-mask
image-classification
image-feature-extraction
image-segmentation
image-to-image
image-to-text
mask-generation
object-detection
question-answering
summarization
table-question-answering
text2text-generation
text-classification (alias sentiment-analysis available)
text-generation
text-to-audio (alias text-to-speech available)
token-classification (alias ner available)
translation
translation_xx_to_yy
video-classification
visual-question-answering
zero-shot-classification
zero-shot-image-classification
zero-shot-audio-classification
zero-shot-object-detection'''.split("\n")
]
@app.route("/<task>")
def run_task(task):
(input, kargs)=parse_params()
pipe=pipeline(task, **kargs)
return pipe(input)
@app.route("/<user>/<model>")
def run_model():
(input, kargs)=parse_params()
pipe=pipeline(model=f'{user}/{model}', **kargs)
return pipe(input)
@app.route("/<task>/<user>/<model>")
def run_task_model():
(input, kargs)=parse_params()
pipe=pipeline(task, model=f'{user}/{model}', **kargs)
return pipe(input)
logging.info("xtts is ready") |