Danil commited on
Commit
eba2192
β€’
1 Parent(s): a352cad
Files changed (2) hide show
  1. README.md +2 -2
  2. app.py +11 -12
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: Incoder Api
3
  emoji: πŸ’»
4
- colorFrom: red
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 2.9.4
8
  app_file: app.py
 
1
  ---
2
  title: Incoder Api
3
  emoji: πŸ’»
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: gradio
7
  sdk_version: 2.9.4
8
  app_file: app.py
app.py CHANGED
@@ -2,18 +2,18 @@ import requests
2
  import os
3
  import gradio as gr
4
  import json
 
5
 
6
- def start_server():
7
- os.system("uvicorn server:app --port 8080 --host 0.0.0.0 --workers 1")
8
- os.environ["SSTART"] = "1"
9
-
10
- if os.environ.get('SSTART') != "1":
11
- start_server()
12
 
13
- def completion(prompt,max_tokens,temperature,top_k,top_p):
14
- req = f"http://0.0.0.0:8080?input_text={prompt}&top_p={top_p}&top_k={top_k}&temperature={temperature}&max_length={prompt}"
15
- g = requests.get(req).json()
16
- return g['text']
 
17
 
18
  demo = gr.Interface(
19
  fn=completion,
@@ -32,5 +32,4 @@ demo = gr.Interface(
32
 
33
  )
34
 
35
- if __name__ == "__main__":
36
- demo.launch()
 
2
  import os
3
  import gradio as gr
4
  import json
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer
6
 
7
+ model_name = 'facebook/incoder-1B'
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True)
10
+ print('load ok')
 
 
11
 
12
+ def completion(prompt, max_tokens, temperature, top_k, top_p):
13
+ inpt = tokenizer.encode(prompt, return_tensors="pt")
14
+ out = model.generate(inpt, max_length=max_tokens, top_p=top_p, top_k=top_k, temperature=temperature)
15
+ res = tokenizer.decode(out[0])
16
+ return res
17
 
18
  demo = gr.Interface(
19
  fn=completion,
 
32
 
33
  )
34
 
35
+ demo.launch()