AdamNovotnyCom commited on
Commit
248a405
1 Parent(s): 338822e
Files changed (5) hide show
  1. Dockerfile +2 -0
  2. Dockerfile_dev +24 -0
  3. README.md +1 -0
  4. app.py +9 -5
  5. docker-compose.yml +1 -1
Dockerfile CHANGED
@@ -18,6 +18,8 @@ COPY --chown=user . /home/user/app
18
 
19
  RUN pip install -r requirements.txt
20
 
 
 
21
  EXPOSE 7860
22
 
23
  CMD ["python", "app.py"]
 
18
 
19
  RUN pip install -r requirements.txt
20
 
21
+ RUN --mount=type=secret,id=HF_TOKEN,mode=0444,required=true
22
+
23
  EXPOSE 7860
24
 
25
  CMD ["python", "app.py"]
Dockerfile_dev ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+
3
+ # Set up a new user named "user" with user ID 1000
4
+ RUN useradd -m -u 1000 user
5
+
6
+ # Switch to the "user" user
7
+ USER user
8
+
9
+ # Set home to the user's home directory
10
+ ENV HOME=/home/user \
11
+ PATH=/home/user/.local/bin:$PATH
12
+
13
+ # Set the working directory to the user's home directory
14
+ WORKDIR /home/user/app
15
+
16
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
17
+ COPY --chown=user . /home/user/app
18
+
19
+ RUN pip install -r requirements.txt
20
+
21
+ EXPOSE 7860
22
+
23
+ # with reload
24
+ CMD ["gradio", "app.py"]
README.md CHANGED
@@ -30,3 +30,4 @@ Exec command
30
  ## References
31
  - [huggingface.co/llama2](https://huggingface.co/blog/llama2)
32
  - [demo-docker-gradio](https://huggingface.co/spaces/sayakpaul/demo-docker-gradio/tree/main)
 
 
30
  ## References
31
  - [huggingface.co/llama2](https://huggingface.co/blog/llama2)
32
  - [demo-docker-gradio](https://huggingface.co/spaces/sayakpaul/demo-docker-gradio/tree/main)
33
+ - [space config reference](https://huggingface.co/docs/hub/spaces-config-reference)
app.py CHANGED
@@ -7,15 +7,14 @@ from transformers import AutoTokenizer
7
 
8
  print(os.environ["HF_TOKEN"][:5])
9
  logging.info(os.environ["HF_TOKEN"][:5])
 
10
 
11
  pipe_flan = transformers.pipeline("text2text-generation", model="google/flan-t5-small")
12
  def google_flan(input_text):
13
- print("New response")
14
- logging.info("New response")
15
  return pipe_flan(input_text)
16
 
17
- demo = gr.Interface(fn=google_flan, inputs="text", outputs="text")
18
-
19
  # model = "meta-llama/Llama-2-7b-chat-hf"
20
  # tokenizer = AutoTokenizer.from_pretrained(
21
  # model,
@@ -44,6 +43,11 @@ demo = gr.Interface(fn=google_flan, inputs="text", outputs="text")
44
  # output_text += seq["generated_text"] + "\n"
45
  # return output_text
46
 
47
- # demo = gr.Interface(fn=llama2, inputs="text", outputs="text")
 
 
 
 
 
48
 
49
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
7
 
8
  print(os.environ["HF_TOKEN"][:5])
9
  logging.info(os.environ["HF_TOKEN"][:5])
10
+ print("New response 2")
11
 
12
  pipe_flan = transformers.pipeline("text2text-generation", model="google/flan-t5-small")
13
  def google_flan(input_text):
14
+ print("New response 2")
15
+ logging.info("New response 2")
16
  return pipe_flan(input_text)
17
 
 
 
18
  # model = "meta-llama/Llama-2-7b-chat-hf"
19
  # tokenizer = AutoTokenizer.from_pretrained(
20
  # model,
 
43
  # output_text += seq["generated_text"] + "\n"
44
  # return output_text
45
 
46
+ demo = gr.Interface(
47
+ fn=google_flan,
48
+ inputs="text",
49
+ outputs="text",
50
+ theme=gr.themes.Default(primary_hue="blue", secondary_hue="pink")
51
+ )
52
 
53
  demo.launch(server_name="0.0.0.0", server_port=7860)
docker-compose.yml CHANGED
@@ -5,7 +5,7 @@ services:
5
  image: llama2hf-image
6
  build:
7
  context: .
8
- dockerfile: Dockerfile
9
  volumes:
10
  - ./:/home/user/app
11
  working_dir: /home/user/app
 
5
  image: llama2hf-image
6
  build:
7
  context: .
8
+ dockerfile: Dockerfile_dev
9
  volumes:
10
  - ./:/home/user/app
11
  working_dir: /home/user/app