nsarrazin HF staff commited on
Commit
22d7b97
1 Parent(s): a89fb7b

Add a job in release action that updates the .env in prod (#493)

Browse files

* Add a workflow that updates the .env in prod

* move update env to release workflow

* version bumps on actons

* flipped config

* black

.env.template ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # template used in production for HuggingChat.
2
+
3
+ MODELS=`[
4
+ {
5
+ "name": "meta-llama/Llama-2-70b-chat-hf",
6
+ "description": "The latest and biggest model from Meta, fine-tuned for chat.",
7
+ "websiteUrl": "https://ai.meta.com/llama/",
8
+ "userMessageToken": "",
9
+ "userMessageEndToken": " [/INST] ",
10
+ "assistantMessageToken": "",
11
+ "assistantMessageEndToken": " </s><s>[INST] ",
12
+ "preprompt": " ",
13
+ "chatPromptTemplate" : "<s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}}",
14
+ "promptExamples": [
15
+ {
16
+ "title": "Write an email from bullet list",
17
+ "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)"
18
+ }, {
19
+ "title": "Code a snake game",
20
+ "prompt": "Code a basic snake game in python, give explanations for each step."
21
+ }, {
22
+ "title": "Assist in a task",
23
+ "prompt": "How do I make a delicious lemon cheesecake?"
24
+ }
25
+ ],
26
+ "parameters": {
27
+ "temperature": 0.1,
28
+ "top_p": 0.95,
29
+ "repetition_penalty": 1.2,
30
+ "top_k": 50,
31
+ "truncate": 1000,
32
+ "max_new_tokens": 1024
33
+ }
34
+ },
35
+ {
36
+ "name": "codellama/CodeLlama-34b-Instruct-hf",
37
+ "displayName": "codellama/CodeLlama-34b-Instruct-hf",
38
+ "description": "Code Llama, a state of the art code model from Meta.",
39
+ "websiteUrl": "https://about.fb.com/news/2023/08/code-llama-ai-for-coding/",
40
+ "userMessageToken": "",
41
+ "userMessageEndToken": " [/INST] ",
42
+ "assistantMessageToken": "",
43
+ "assistantMessageEndToken": " </s><s>[INST] ",
44
+ "preprompt": " ",
45
+ "chatPromptTemplate" : "<s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}}",
46
+ "promptExamples": [
47
+ {
48
+ "title": "Fibonacci in Python",
49
+ "prompt": "Write a python function to calculate the nth fibonacci number."
50
+ }, {
51
+ "title": "JavaScript promises",
52
+ "prompt": "How can I wait for multiple JavaScript promises to fulfill before doing something with their values?"
53
+ }, {
54
+ "title": "Rust filesystem",
55
+ "prompt": "How can I load a file from disk in Rust?"
56
+ }
57
+ ],
58
+ "parameters": {
59
+ "temperature": 0.1,
60
+ "top_p": 0.95,
61
+ "repetition_penalty": 1.2,
62
+ "top_k": 50,
63
+ "truncate": 1000,
64
+ "max_new_tokens": 2048
65
+ }
66
+ },
67
+ {
68
+ "name": "tiiuae/falcon-180B-chat",
69
+ "displayName": "tiiuae/falcon-180B-chat",
70
+ "description": "Falcon-180B is a 180B parameters causal decoder-only model built by TII and trained on 3,500B tokens.",
71
+ "websiteUrl": "https://www.tii.ae/news/technology-innovation-institute-introduces-worlds-most-powerful-open-llm-falcon-180b",
72
+ "preprompt": " ",
73
+ "chatPromptTemplate": "System: {{preprompt}}\nUser:{{#each messages}}{{#ifUser}}{{content}}\nFalcon:{{/ifUser}}{{#ifAssistant}}{{content}}\nUser:{{/ifAssistant}}{{/each}}",
74
+ "parameters": {
75
+ "temperature": 0.1,
76
+ "top_p": 0.95,
77
+ "repetition_penalty": 1.2,
78
+ "top_k": 50,
79
+ "truncate": 1000,
80
+ "max_new_tokens": 1024,
81
+ "stop": ["User:"]
82
+ },
83
+ "promptExamples": [
84
+ {
85
+ "title": "Write an email from bullet list",
86
+ "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)"
87
+ }, {
88
+ "title": "Code a snake game",
89
+ "prompt": "Code a basic snake game in python, give explanations for each step."
90
+ }, {
91
+ "title": "Assist in a task",
92
+ "prompt": "How do I make a delicious lemon cheesecake?"
93
+ }
94
+ ]
95
+ },
96
+ {
97
+ "name": "mistralai/Mistral-7B-Instruct-v0.1",
98
+ "displayName": "mistralai/Mistral-7B-Instruct-v0.1",
99
+ "description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.",
100
+ "websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/",
101
+ "preprompt": "",
102
+ "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s> {{/ifAssistant}}{{/each}}",
103
+ "parameters": {
104
+ "temperature": 0.1,
105
+ "top_p": 0.95,
106
+ "repetition_penalty": 1.2,
107
+ "top_k": 50,
108
+ "truncate": 1000,
109
+ "max_new_tokens": 2048,
110
+ "stop": ["</s>"]
111
+ },
112
+ "promptExamples": [
113
+ {
114
+ "title": "Write an email from bullet list",
115
+ "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)"
116
+ }, {
117
+ "title": "Code a snake game",
118
+ "prompt": "Code a basic snake game in python, give explanations for each step."
119
+ }, {
120
+ "title": "Assist in a task",
121
+ "prompt": "How do I make a delicious lemon cheesecake?"
122
+ }
123
+ ]
124
+ }
125
+ ]`
126
+
127
+ OLD_MODELS=`[{"name":"bigcode/starcoder"}, {"name":"OpenAssistant/oasst-sft-6-llama-30b-xor"}]`
128
+ TASK_MODEL='mistralai/Mistral-7B-Instruct-v0.1'
129
+
130
+ APP_BASE="/chat"
131
+ PUBLIC_ORIGIN=https://huggingface.co
132
+ PUBLIC_SHARE_PREFIX=https://hf.co/chat
133
+ PUBLIC_ANNOUNCEMENT_BANNERS=`[
134
+ {
135
+ "title": "Websearch 2.0, now with RAG & sources",
136
+ "linkTitle": "Discuss",
137
+ "linkHref": "https://huggingface.co/spaces/huggingchat/chat-ui/discussions/254"
138
+ }
139
+ ]`
140
+
141
+ PUBLIC_APP_NAME=HuggingChat
142
+ PUBLIC_APP_ASSETS=huggingchat
143
+ PUBLIC_APP_COLOR=yellow
144
+ PUBLIC_APP_DATA_SHARING=1
145
+ PUBLIC_APP_DISCLAIMER=1
146
+
147
+ RATE_LIMIT=16
148
+ MESSAGES_BEFORE_LOGIN=1# how many messages a user can send in a conversation before having to login. set to 0 to force login right away
149
+
150
+ # Not part of the .env but set as other variables in the space
151
+ # ADDRESS_HEADER=X-Forwarded-For
152
+ # XFF_DEPTH=2
.github/workflows/deploy-release.yml CHANGED
@@ -7,6 +7,27 @@ on:
7
  workflow_dispatch:
8
 
9
  jobs:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  sync-to-hub:
11
  runs-on: ubuntu-latest
12
  steps:
 
7
  workflow_dispatch:
8
 
9
  jobs:
10
+ update-env:
11
+ name: "Update production environment config"
12
+ runs-on: ubuntu-latest
13
+
14
+ steps:
15
+ - name: checkout repo content
16
+ uses: actions/checkout@v3 # checkout the repository content to github runner.
17
+ - name: setup python
18
+ uses: actions/setup-python@v4
19
+ with:
20
+ python-version: 3.11 #install the python needed
21
+ - name: "Install dependencies"
22
+ run: |
23
+ python -m pip install --upgrade pip
24
+ pip install huggingface_hub
25
+ - name: Update .env.local # run file
26
+ env:
27
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
28
+ SECRET_CONFIG: ${{ secrets.SECRET_CONFIG }}
29
+ run: |
30
+ python update_env.py
31
  sync-to-hub:
32
  runs-on: ubuntu-latest
33
  steps:
.gitignore CHANGED
@@ -6,5 +6,6 @@ node_modules
6
  .env
7
  .env.*
8
  !.env.example
 
9
  vite.config.js.timestamp-*
10
  vite.config.ts.timestamp-*
 
6
  .env
7
  .env.*
8
  !.env.example
9
+ !.env.template
10
  vite.config.js.timestamp-*
11
  vite.config.ts.timestamp-*
update_env.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from huggingface_hub import HfApi
3
+
4
+
5
+ SECRET_CONFIG = os.environ["SECRET_CONFIG"]
6
+ HF_TOKEN = os.environ["HF_TOKEN"]
7
+
8
+ # Read the content of the file .env.template
9
+ with open(".env.template", "r") as f:
10
+ PUBLIC_CONFIG = f.read()
11
+
12
+ # Prepend the content of the env variable SECRET_CONFIG
13
+ full_config = f"{PUBLIC_CONFIG}\n{SECRET_CONFIG}"
14
+
15
+ api = HfApi()
16
+
17
+ api.add_space_secret("huggingchat/chat-ui", "DOTENV_LOCAL", full_config, token=HF_TOKEN)