ZequnZ commited on
Commit
8bdafc1
1 Parent(s): a1c6e63

remove token

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -2,13 +2,14 @@ from typing import Iterator
2
  import gradio as gr
3
  import random
4
  import time
 
5
 
6
  from text_generation import Client
7
 
8
  model_id = "mistralai/Mistral-7B-Instruct-v0.1"
9
 
10
  API_URL = "https://api-inference.huggingface.co/models/" + model_id
11
- HF_TOKEN = "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"
12
  SYSTEM_PROMPT = "I want you to act as a great assistant. You will provide trustful information and can inspire me to think more using supportive languages."
13
 
14
  client = Client(
@@ -49,7 +50,9 @@ def generate_prompts(
49
  theme = "WeixuanYuan/Soft_dark"
50
 
51
  with gr.Blocks(theme=theme) as demo:
52
- gr.Markdown("# Chat with Mistral-7B\n[Github](https://github.com/ZequnZ/Chat-with-Mistral-7B)")
 
 
53
  with gr.Row():
54
  chatbot = gr.Chatbot(scale=6)
55
 
 
2
  import gradio as gr
3
  import random
4
  import time
5
+ import os
6
 
7
  from text_generation import Client
8
 
9
  model_id = "mistralai/Mistral-7B-Instruct-v0.1"
10
 
11
  API_URL = "https://api-inference.huggingface.co/models/" + model_id
12
+ HF_TOKEN = os.environ.get("Mistral-7B-Read", False)
13
  SYSTEM_PROMPT = "I want you to act as a great assistant. You will provide trustful information and can inspire me to think more using supportive languages."
14
 
15
  client = Client(
 
50
  theme = "WeixuanYuan/Soft_dark"
51
 
52
  with gr.Blocks(theme=theme) as demo:
53
+ gr.Markdown(
54
+ "# Chat with Mistral-7B\n[Github](https://github.com/ZequnZ/Chat-with-Mistral-7B)"
55
+ )
56
  with gr.Row():
57
  chatbot = gr.Chatbot(scale=6)
58