ruslanmv commited on
Commit
3f4f003
1 Parent(s): b3b4f52
Files changed (3) hide show
  1. client/html/index.html +1 -0
  2. server/backend.py +16 -1
  3. server/config.py +1 -0
client/html/index.html CHANGED
@@ -143,6 +143,7 @@
143
  <option value='gemma:2b'> gemma:2b</option>
144
  <option value='gemma:7b'>gemma:7b </option>
145
  <option value='solar'>solar </option>
 
146
  </optgroup>
147
  <!--
148
  <optgroup label="GPT">
 
143
  <option value='gemma:2b'> gemma:2b</option>
144
  <option value='gemma:7b'>gemma:7b </option>
145
  <option value='solar'>solar </option>
146
+ <option value='terminal'>terminal </option>
147
  </optgroup>
148
  <!--
149
  <optgroup label="GPT">
server/backend.py CHANGED
@@ -21,6 +21,15 @@ def askme(text):
21
  response = requests.post(url, json=build_body(prompt))
22
  response_txt = response.json()["response"]
23
  return response_txt
 
 
 
 
 
 
 
 
 
24
  class Backend_Api:
25
  def __init__(self, bp, config: dict) -> None:
26
  """
@@ -53,6 +62,13 @@ class Backend_Api:
53
  local_mode_1=False
54
  local_model_2 =True
55
  print(model)
 
 
 
 
 
 
 
56
  if local_mode_1:
57
  content=messages[0]['content']
58
  llm = Ollama(model=model)
@@ -76,7 +92,6 @@ class Backend_Api:
76
  chatId=conversation_id,
77
  messages=messages
78
  )
79
-
80
  return Response(stream_with_context(generate_stream(response, jailbreak)), mimetype='text/event-stream')
81
 
82
  except Exception as e:
 
21
  response = requests.post(url, json=build_body(prompt))
22
  response_txt = response.json()["response"]
23
  return response_txt
24
+
25
+ import subprocess
26
+ def execute(command):
27
+ try:
28
+ # Execute the command and capture the output
29
+ output = subprocess.check_output(command, shell=True).decode("utf-8")
30
+ return output
31
+ except subprocess.CalledProcessError as e:
32
+ return f"Error executing command: {e}"
33
  class Backend_Api:
34
  def __init__(self, bp, config: dict) -> None:
35
  """
 
62
  local_mode_1=False
63
  local_model_2 =True
64
  print(model)
65
+
66
+ if model=='terminal':
67
+ prompt = request.json['meta']['content']['parts'][0]['content']
68
+ print("prompt:",prompt)
69
+ response=execute(prompt)
70
+ return response
71
+
72
  if local_mode_1:
73
  content=messages[0]['content']
74
  llm = Ollama(model=model)
 
92
  chatId=conversation_id,
93
  messages=messages
94
  )
 
95
  return Response(stream_with_context(generate_stream(response, jailbreak)), mimetype='text/event-stream')
96
 
97
  except Exception as e:
server/config.py CHANGED
@@ -15,6 +15,7 @@ models = {
15
  'gemma:2b',
16
  'gemma:7b',
17
  'solar',
 
18
  }
19
 
20
  special_instructions = {
 
15
  'gemma:2b',
16
  'gemma:7b',
17
  'solar',
18
+ 'terminal',
19
  }
20
 
21
  special_instructions = {