File size: 1,783 Bytes
e48ab6b
 
 
 
 
cf7a07e
5c54d1b
669a4c0
e48ab6b
24fbd15
 
e48ab6b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96906d9
7b0437a
 
 
 
 
 
 
 
 
96906d9
 
 
 
 
7b0437a
 
 
 
 
 
 
 
 
 
 
96906d9
 
 
e48ab6b
 
 
 
f0e04ff
e48ab6b
24fbd15
1418034
e48ab6b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
from threading import Thread
import gradio as gr
import inspect
from gradio import routes
from typing import List, Type

import requests, os, re, asyncio, json

loop = asyncio.get_event_loop()

# init code
def get_types(cls_set: List[Type], component: str):
    docset = []
    types = []
    if component == "input":
        for cls in cls_set:
            doc = inspect.getdoc(cls)
            doc_lines = doc.split("\n")
            docset.append(doc_lines[1].split(":")[-1])
            types.append(doc_lines[1].split(")")[0].split("(")[-1])
    else:
        for cls in cls_set:
            doc = inspect.getdoc(cls)
            doc_lines = doc.split("\n")
            docset.append(doc_lines[-1].split(":")[-1])
            types.append(doc_lines[-1].split(")")[0].split("(")[-1])
    return docset, types
routes.get_types = get_types

# App code

def chat(id, npc, prompt):

    # get_coin endpoint
    response = requests.post("https://ldhldh-api-for-unity.hf.space/run/predict_6", json={
      "data": [
        id,
    ]}).json()
    
    coin = response["data"]
    if int(coin) == 0:
        return "no coin"
    
    # model inference
    output = "AI μ‘λ‹΅μž…λ‹ˆλ‹€."


    # add_transaction endpoint
    response = requests.post("https://ldhldh-api-for-unity.hf.space/run/predict_5", json={
      "data": [
        id, 
        "inference", 
        {"prompt":prompt, "output":output}
    ]}).json()
    
    d = response["data"]
    
    return output


with gr.Blocks() as demo:
    count = 0
    aa = gr.Interface(
      fn=chat,
      inputs=["text","text","text"],
      outputs="text",
      description="chat, ai 응닡을 λ°˜ν™˜ν•©λ‹ˆλ‹€. λ‚΄λΆ€μ μœΌλ‘œ νŠΈλžœμž­μ…˜ 생성. \n /run/predict",
    )
    
    demo.queue(max_size=32).launch(enable_queue=True)