ldhldh commited on
Commit
f223851
โ€ข
1 Parent(s): 75f4dd5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -27
app.py CHANGED
@@ -10,6 +10,9 @@ import time
10
  import datetime
11
  import requests, json
12
 
 
 
 
13
  loop = asyncio.get_event_loop()
14
  # Monkey patch
15
  def get_types(cls_set: List[Type], component: str):
@@ -30,43 +33,25 @@ def get_types(cls_set: List[Type], component: str):
30
  return docset, types
31
  routes.get_types = get_types
32
 
33
-
34
- q = queue.Queue()
35
-
36
- arrr = []
37
-
38
- from huggingface_hub import hf_hub_download
39
  hf_hub_download(repo_id='StarFox7/Llama-2-ko-7B-chat-gguf', filename='Llama-2-ko-7B-chat-gguf-q4_0.bin', local_dir='./')
40
 
 
 
 
41
  # App code
42
- def res(x, id, cdata, url):
43
- global q
44
-
45
- arr = [x, id, str(cdata.split(",", 1)[0]), url]
46
- q.put(arr)
47
-
48
- arrr.append(x)
49
- print("\n_Done\n\n")
50
- return "Done"
51
 
52
- def rese(x):
53
-
54
- print(f"{arrr}")
55
- return "Done"
56
 
57
  with gr.Blocks() as demo:
58
  count = 0
59
  aa = gr.Interface(
60
- fn=res,
61
- inputs=["text","text", "text", "text"],
62
- outputs="text",
63
- description="call",
64
- )
65
-
66
- bb = gr.Interface(
67
- fn=rese,
68
  inputs=["text"],
69
  outputs="text",
70
  description="call",
71
  )
 
 
72
  demo.queue(max_size=32).launch(enable_queue=True)
 
10
  import datetime
11
  import requests, json
12
 
13
+ from huggingface_hub import hf_hub_download
14
+ from llama_cpp import Llama
15
+
16
  loop = asyncio.get_event_loop()
17
  # Monkey patch
18
  def get_types(cls_set: List[Type], component: str):
 
33
  return docset, types
34
  routes.get_types = get_types
35
 
 
 
 
 
 
 
36
  hf_hub_download(repo_id='StarFox7/Llama-2-ko-7B-chat-gguf', filename='Llama-2-ko-7B-chat-gguf-q4_0.bin', local_dir='./')
37
 
38
+ llm = Llama(model_path = 'Llama-2-ko-7B-chat-gguf-q4_0.bin',
39
+ n_ctx=2048,
40
+ )
41
  # App code
42
+ def res(x):
43
+ output = llm(f"๋‹ค์Œ์€ A์™€ B์˜ ์—ญํ• ๊ทน์ด์•ผ. ๋„ˆ๋Š” B์•ผ. A์™€ ๋Œ€ํ™”ํ•˜๊ณ  ์žˆ์–ด. ์นœ๊ตฌ์—๊ฒŒ ์นœ๊ทผํ•˜๊ณ  ๊ฐ„๊ฒฐํ•˜๊ฒŒ ์ž˜ ๋Œ€๋‹ตํ•ด์ค˜.\n\n### A:\n{x}\n\n### B:\n", max_tokens=100, stop=["###"], echo=True)
44
+ return output['choices'][0]['text']
 
 
 
 
 
 
45
 
 
 
 
 
46
 
47
  with gr.Blocks() as demo:
48
  count = 0
49
  aa = gr.Interface(
50
+ fn=chat,
 
 
 
 
 
 
 
51
  inputs=["text"],
52
  outputs="text",
53
  description="call",
54
  )
55
+
56
+
57
  demo.queue(max_size=32).launch(enable_queue=True)