ruslanmv commited on
Commit
837b5d6
1 Parent(s): 08a82bb

Adding models

Browse files
Files changed (3) hide show
  1. Dockerfile +1 -1
  2. server/backend.py +2 -1
  3. server/utils.py +53 -0
Dockerfile CHANGED
@@ -34,7 +34,7 @@ ENV PATH="/app/venv/bin:/root/.ollama/bin:$PATH"
34
  # Expose the server port
35
  EXPOSE 7860
36
  EXPOSE 11434
37
-
38
  # Copy the entry point script
39
  COPY entrypoint.sh /entrypoint.sh
40
  RUN chmod +x /entrypoint.sh
 
34
  # Expose the server port
35
  EXPOSE 7860
36
  EXPOSE 11434
37
+ EXPOSE 1338
38
  # Copy the entry point script
39
  COPY entrypoint.sh /entrypoint.sh
40
  RUN chmod +x /entrypoint.sh
server/backend.py CHANGED
@@ -4,6 +4,7 @@ from g4f import ChatCompletion
4
  from flask import request, Response, stream_with_context
5
  from requests import get
6
  from server.config import special_instructions
 
7
  from langchain_community.llms import Ollama
8
  import requests
9
 
@@ -60,7 +61,7 @@ class Backend_Api:
60
  api_key = request.json['api_key']
61
  jailbreak = request.json['jailbreak']
62
  model = request.json['model']
63
-
64
  messages = build_messages(jailbreak)
65
  local_mode_1=True
66
  local_model_2 =False
 
4
  from flask import request, Response, stream_with_context
5
  from requests import get
6
  from server.config import special_instructions
7
+ from server.utils import check_model
8
  from langchain_community.llms import Ollama
9
  import requests
10
 
 
61
  api_key = request.json['api_key']
62
  jailbreak = request.json['jailbreak']
63
  model = request.json['model']
64
+ check_model(model)
65
  messages = build_messages(jailbreak)
66
  local_mode_1=True
67
  local_model_2 =False
server/utils.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ def check_model_exists(model_name):
3
+ try:
4
+ # List available models
5
+ output = subprocess.check_output("ollama list", shell=True, stderr=subprocess.STDOUT, universal_newlines=True)
6
+ available_models = [line.split()[0] for line in output.strip().split('\n')[1:]]
7
+ return any(model_name in model for model in available_models)
8
+ except subprocess.CalledProcessError as e:
9
+ print(f"Error checking models: {e.output}")
10
+ return False
11
+ except Exception as e:
12
+ print(f"An unexpected error occurred: {str(e)}")
13
+ return False
14
+
15
+
16
+ def download_model(model_name):
17
+ remote_models=['llama3',
18
+ 'llama3:70b',
19
+ 'phi3',
20
+ 'mistral',
21
+ 'neural-chat',
22
+ 'starling-lm',
23
+ 'codellama',
24
+ 'llama2-uncensored',
25
+ 'llava',
26
+ 'gemma:2b',
27
+ 'gemma:7b',
28
+ 'solar']
29
+ if model_name in remote_models:
30
+ try:
31
+ # Download the model
32
+ print(f"Downloading model '{model_name}'...")
33
+ subprocess.check_call(f"ollama pull {model_name}", shell=True)
34
+ print(f"Model '{model_name}' downloaded successfully.")
35
+ except subprocess.CalledProcessError as e:
36
+ print(f"Error downloading model: {e.output}")
37
+ raise e
38
+ except Exception as e:
39
+ print(f"An unexpected error occurred: {str(e)}")
40
+ raise e
41
+ else:
42
+ print("Not supported model currently")
43
+
44
+
45
+ def check_model(model_name):
46
+ if not check_model_exists(model_name):
47
+ try:
48
+ download_model(model_name)
49
+ except Exception as e:
50
+ print(f"Failed to download model '{model_name}': {e}")
51
+ return
52
+ else:
53
+ print("OK")