ruslanmv commited on
Commit
42e16df
1 Parent(s): c772fda
Files changed (4) hide show
  1. Dockerfile +15 -23
  2. entrypoint.sh +5 -5
  3. start.sh +4 -18
  4. startbk2.sh +21 -0
Dockerfile CHANGED
@@ -6,12 +6,6 @@ WORKDIR /app
6
  # Copy requirements file
7
  COPY requirements.txt requirements.txt
8
 
9
- # Create a virtual environment
10
- RUN python -m venv venv
11
-
12
- # Set the PATH to use the virtual environment
13
- ENV PATH="/app/venv/bin:$PATH"
14
-
15
  # Update package list and install necessary packages in a single step
16
  RUN apt-get update && apt-get install -y \
17
  curl \
@@ -19,38 +13,36 @@ RUN apt-get update && apt-get install -y \
19
  libffi-dev \
20
  cmake \
21
  libcurl4-openssl-dev \
22
- tini \
23
- systemd && \
24
  apt-get clean
25
 
26
  # Upgrade pip and install dependencies
27
- RUN python -m pip install --upgrade pip
28
- RUN pip install --no-cache-dir -r requirements.txt
 
 
29
 
30
- # Install application
31
  RUN curl https://ollama.ai/install.sh | sh
32
 
33
  # Create the directory and give appropriate permissions
34
  RUN mkdir -p /.ollama && chmod 777 /.ollama
35
 
36
- WORKDIR /.ollama
 
 
 
 
 
37
  # Copy the entry point script
38
  COPY entrypoint.sh /entrypoint.sh
39
  RUN chmod +x /entrypoint.sh
 
40
  # Set the entry point script as the default command
41
  ENTRYPOINT ["/entrypoint.sh"]
42
 
43
- CMD ["ollama", "serve"]
44
-
45
  # Set the model as an environment variable (this can be overridden)
46
- ENV model=${model}
47
-
48
- # Expose the server port
49
- EXPOSE 7860
50
-
51
- # Ensure Ollama binary is in the PATH
52
- RUN which ollama
53
-
54
 
55
  # Copy the entire application
56
  COPY . .
@@ -63,4 +55,4 @@ COPY start.sh .
63
  RUN chmod +x start.sh
64
 
65
  # Define the command to run the application
66
- CMD ["python", "./run.py"]
 
6
  # Copy requirements file
7
  COPY requirements.txt requirements.txt
8
 
 
 
 
 
 
 
9
  # Update package list and install necessary packages in a single step
10
  RUN apt-get update && apt-get install -y \
11
  curl \
 
13
  libffi-dev \
14
  cmake \
15
  libcurl4-openssl-dev \
16
+ tini && \
 
17
  apt-get clean
18
 
19
  # Upgrade pip and install dependencies
20
+ RUN python -m venv venv && \
21
+ . /app/venv/bin/activate && \
22
+ pip install --upgrade pip && \
23
+ pip install --no-cache-dir -r requirements.txt
24
 
25
+ # Install Ollama
26
  RUN curl https://ollama.ai/install.sh | sh
27
 
28
  # Create the directory and give appropriate permissions
29
  RUN mkdir -p /.ollama && chmod 777 /.ollama
30
 
31
+ # Ensure Ollama binary is in the PATH
32
+ ENV PATH="/app/venv/bin:/root/.ollama/bin:$PATH"
33
+
34
+ # Expose the server port
35
+ EXPOSE 7860
36
+
37
  # Copy the entry point script
38
  COPY entrypoint.sh /entrypoint.sh
39
  RUN chmod +x /entrypoint.sh
40
+
41
  # Set the entry point script as the default command
42
  ENTRYPOINT ["/entrypoint.sh"]
43
 
 
 
44
  # Set the model as an environment variable (this can be overridden)
45
+ ENV model="default_model"
 
 
 
 
 
 
 
46
 
47
  # Copy the entire application
48
  COPY . .
 
55
  RUN chmod +x start.sh
56
 
57
  # Define the command to run the application
58
+ CMD ["./start.sh"]
entrypoint.sh CHANGED
@@ -1,7 +1,10 @@
1
  #!/bin/bash
2
 
 
 
 
3
  # Starting server
4
- echo "Starting server"
5
  ollama serve &
6
  sleep 1
7
 
@@ -11,10 +14,7 @@ for m in "${MODELS[@]}"; do
11
  echo "Pulling $m"
12
  ollama pull "$m"
13
  sleep 5
14
- # echo "Running $m"
15
- # ollama run "$m"
16
- # No need to sleep here unless you want to give some delay between each pull for some reason
17
  done
18
 
19
  # Keep the script running to prevent the container from exiting
20
- #wait
 
1
  #!/bin/bash
2
 
3
+ # Source the virtual environment
4
+ source /app/venv/bin/activate
5
+
6
  # Starting server
7
+ echo "Starting Ollama server"
8
  ollama serve &
9
  sleep 1
10
 
 
14
  echo "Pulling $m"
15
  ollama pull "$m"
16
  sleep 5
 
 
 
17
  done
18
 
19
  # Keep the script running to prevent the container from exiting
20
+ wait
start.sh CHANGED
@@ -1,21 +1,7 @@
1
  #!/bin/bash
2
- # Starting server
3
- echo "Starting Ollama server..."
4
- ollama serve &
5
 
6
- # Wait for the Ollama server to be ready
7
- echo "Waiting for Ollama server to be ready..."
8
- until curl -sSf http://localhost:11434/api/status > /dev/null; do
9
- echo "Waiting for Ollama server to start..."
10
- sleep 2
11
- done
12
 
13
- echo "Ollama server is ready."
14
-
15
- # Pull the required model
16
- echo "Pulling llama3 model..."
17
- ollama pull llama3
18
-
19
- # Start the web UI
20
- echo "Starting web UI..."
21
- python run.py
 
1
  #!/bin/bash
 
 
 
2
 
3
+ # Source the virtual environment
4
+ source /app/venv/bin/activate
 
 
 
 
5
 
6
+ # Run the Python application
7
+ exec python ./run.py
 
 
 
 
 
 
 
startbk2.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Starting server
3
+ echo "Starting Ollama server..."
4
+ ollama serve &
5
+
6
+ # Wait for the Ollama server to be ready
7
+ echo "Waiting for Ollama server to be ready..."
8
+ until curl -sSf http://localhost:11434/api/status > /dev/null; do
9
+ echo "Waiting for Ollama server to start..."
10
+ sleep 2
11
+ done
12
+
13
+ echo "Ollama server is ready."
14
+
15
+ # Pull the required model
16
+ echo "Pulling llama3 model..."
17
+ ollama pull llama3
18
+
19
+ # Start the web UI
20
+ echo "Starting web UI..."
21
+ python run.py