ruslanmv commited on
Commit
a871eb3
1 Parent(s): 837b5d6

some models

Browse files
Files changed (3) hide show
  1. .gitignore +3 -1
  2. client/html/index.html +6 -4
  3. entrypoint.sh +11 -1
.gitignore CHANGED
@@ -1,2 +1,4 @@
1
  develop/*
2
- translations/*
 
 
 
1
  develop/*
2
+ translations/*
3
+ /server/__pycache__/*
4
+ *.pyc
client/html/index.html CHANGED
@@ -131,19 +131,21 @@
131
  <div class="field">
132
  <select class="dropdown" name="model" id="model">
133
  <optgroup label="Ollama">
134
- <option value="llama3">llama3</option>
135
- <option value='llama3:70b'>llama3:70b </option>
136
  <option value='phi3'>phi3 </option>
137
  <option value='mistral'>mistral </option>
 
 
 
 
 
138
  <option value='neural-chat'>neural-chat </option>
139
  <option value='starling-lm'>starling-lm </option>
140
  <option value='codellama'>codellama </option>
141
  <option value='llama2-uncensored'>llama2-uncensored </option>
142
  <option value='llava'>llava </option>
143
- <option value='gemma:2b'> gemma:2b</option>
144
- <option value='gemma:7b'>gemma:7b </option>
145
  <option value='solar'>solar </option>
146
  <option value='terminal'>terminal </option>
 
147
  </optgroup>
148
  <!--
149
  <optgroup label="GPT">
 
131
  <div class="field">
132
  <select class="dropdown" name="model" id="model">
133
  <optgroup label="Ollama">
 
 
134
  <option value='phi3'>phi3 </option>
135
  <option value='mistral'>mistral </option>
136
+ <option value="llama3">llama3</option>
137
+ <option value='gemma:2b'> gemma:2b</option>
138
+ <option value='gemma:7b'>gemma:7b </option>
139
+ <!--
140
+ <option value='llama3:70b'>llama3:70b </option>
141
  <option value='neural-chat'>neural-chat </option>
142
  <option value='starling-lm'>starling-lm </option>
143
  <option value='codellama'>codellama </option>
144
  <option value='llama2-uncensored'>llama2-uncensored </option>
145
  <option value='llava'>llava </option>
 
 
146
  <option value='solar'>solar </option>
147
  <option value='terminal'>terminal </option>
148
+ -->
149
  </optgroup>
150
  <!--
151
  <optgroup label="GPT">
entrypoint.sh CHANGED
@@ -8,8 +8,18 @@ echo "Starting Ollama server"
8
  ollama serve &
9
  sleep 1
10
 
 
 
 
 
 
 
 
 
 
 
11
  # Splitting the models by comma and pulling each
12
- IFS=',' read -ra MODELS <<< "$model"
13
  for m in "${MODELS[@]}"; do
14
  echo "Pulling $m"
15
  ollama pull "$m"
 
8
  ollama serve &
9
  sleep 1
10
 
11
+ # Try to get the model environment variable
12
+ if [ -n "${MODEL}" ]; then
13
+ # Split the MODEL variable into an array
14
+ IFS=',' read -ra MODELS <<< "${MODEL}"
15
+ else
16
+ # Use the default list of models
17
+ MODELS=(phi3 mistral llama3 gemma:2b)
18
+ fi
19
+
20
+
21
  # Splitting the models by comma and pulling each
22
+ #IFS=',' read -ra MODELS <<< "$model"
23
  for m in "${MODELS[@]}"; do
24
  echo "Pulling $m"
25
  ollama pull "$m"