csabakecskemeti commited on
Commit
b293ad4
1 Parent(s): 54929aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -24
app.py CHANGED
@@ -5,10 +5,60 @@ import json
5
 
6
  sbc_host_url = os.environ['URL']
7
 
8
- def get_completion(prompt:str, messages:str = '', n_predict=128):
9
- system = "### System: You are a helpful assistant helps to brainstorm ideas.\n"
10
- prompt_templated = f'{system} {messages}\n ### HUMAN:\n{prompt} \n ### ASSISTANT:'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  headers = {
13
  "Content-Type": "application/json"
14
  }
@@ -16,36 +66,28 @@ def get_completion(prompt:str, messages:str = '', n_predict=128):
16
  "prompt": prompt_templated,
17
  "n_predict": n_predict,
18
  "stop": ["### HUMAN:", "### ASSISTANT:", "HUMAN"],
19
- "stream": "True"
20
  }
 
 
21
  try:
22
- response = requests.post(sbc_host_url, headers=headers, data=json.dumps(data))
23
 
24
  if response.status_code == 200:
25
- return response.json()['content']
 
 
 
 
 
 
 
26
  else:
27
  response.raise_for_status()
28
- except:
29
  raise gr.Warning("Apologies for the inconvenience! Our model is currently self-hosted and unavailable at the moment.")
30
 
31
 
32
- def chatty(prompt, messages):
33
- # print(prompt)
34
- # print(f'messages: {messages}')
35
- past_messages = ''
36
- if len(messages) > 0:
37
- for idx, message in enumerate(messages):
38
- print(f'idx: {idx}, message: {message}')
39
- past_messages += f'\n### HUMAN: {message[0]}'
40
- past_messages += f'\n### ASSISTANT: {message[1]}'
41
-
42
-
43
- # past_messages = messages[0][0]
44
- # print(f'past_messages: {past_messages}')
45
- messages = get_completion(prompt, past_messages)
46
- return messages.split('### ASSISTANT:')[-1]
47
-
48
-
49
  with gr.Blocks() as demo:
50
  gr.Image("sbc.jpg")
51
  gr.ChatInterface(
 
5
 
6
  sbc_host_url = os.environ['URL']
7
 
8
+ # def get_completion(prompt:str, messages:str = '', n_predict=128):
9
+ # system = "### System: You are a helpful assistant helps to brainstorm ideas.\n"
10
+ # prompt_templated = f'{system} {messages}\n ### HUMAN:\n{prompt} \n ### ASSISTANT:'
11
+
12
+ # headers = {
13
+ # "Content-Type": "application/json"
14
+ # }
15
+ # data = {
16
+ # "prompt": prompt_templated,
17
+ # "n_predict": n_predict,
18
+ # "stop": ["### HUMAN:", "### ASSISTANT:", "HUMAN"],
19
+ # "stream": "True"
20
+ # }
21
+ # try:
22
+ # response = requests.post(sbc_host_url, headers=headers, data=json.dumps(data))
23
+
24
+ # if response.status_code == 200:
25
+ # return response.json()['content']
26
+ # else:
27
+ # response.raise_for_status()
28
+ # except:
29
+ # raise gr.Warning("Apologies for the inconvenience! Our model is currently self-hosted and unavailable at the moment.")
30
+
31
 
32
+ # def chatty(prompt, messages):
33
+ # # print(prompt)
34
+ # # print(f'messages: {messages}')
35
+ # past_messages = ''
36
+ # if len(messages) > 0:
37
+ # for idx, message in enumerate(messages):
38
+ # print(f'idx: {idx}, message: {message}')
39
+ # past_messages += f'\n### HUMAN: {message[0]}'
40
+ # past_messages += f'\n### ASSISTANT: {message[1]}'
41
+
42
+
43
+ # # past_messages = messages[0][0]
44
+ # # print(f'past_messages: {past_messages}')
45
+ # messages = get_completion(prompt, past_messages)
46
+ # return messages.split('### ASSISTANT:')[-1]
47
+
48
+ # stream
49
+ def chatty(prompt, messages, n_predict=128):
50
+ # print(prompt)
51
+ # print(f'messages: {messages}')
52
+ past_messages = ''
53
+ if len(messages) > 0:
54
+ for idx, message in enumerate(messages):
55
+ print(f'idx: {idx}, message: {message}')
56
+ past_messages += f'\n### HUMAN: {message[0]}'
57
+ past_messages += f'\n### ASSISTANT: {message[1]}'
58
+
59
+ system = "### System: You help to brainstorm ideas.\n"
60
+ prompt_templated = f'{system} {messages}\n ### HUMAN:\n{prompt} \n ### ASSISTANT:'
61
+
62
  headers = {
63
  "Content-Type": "application/json"
64
  }
 
66
  "prompt": prompt_templated,
67
  "n_predict": n_predict,
68
  "stop": ["### HUMAN:", "### ASSISTANT:", "HUMAN"],
69
+ "stream": True
70
  }
71
+
72
+ result = ""
73
  try:
74
+ response = requests.post(sbc_host_url, headers=headers, data=json.dumps(data), stream=True)
75
 
76
  if response.status_code == 200:
77
+ for line in response.iter_lines():
78
+ if line:
79
+ try:
80
+ result += json.loads(line.decode('utf-8').replace('data: ', ''))['content']
81
+ except:
82
+ # LMStudio response has empty token
83
+ pass
84
+ yield result
85
  else:
86
  response.raise_for_status()
87
+ except requests.exceptions.RequestException as e:
88
  raise gr.Warning("Apologies for the inconvenience! Our model is currently self-hosted and unavailable at the moment.")
89
 
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  with gr.Blocks() as demo:
92
  gr.Image("sbc.jpg")
93
  gr.ChatInterface(