SHIROI-07 commited on
Commit
67dc5e4
·
verified ·
1 Parent(s): 4b681f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -30
app.py CHANGED
@@ -1,8 +1,20 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- # 🔁 CHANGE THIS to your own model name
5
- client = InferenceClient("SHIROI-07/skilllink-coach")
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  def respond(
8
  message,
@@ -12,29 +24,24 @@ def respond(
12
  temperature,
13
  top_p,
14
  ):
15
- messages = [{"role": "system", "content": system_message}]
16
-
17
- for val in history:
18
- if val[0]:
19
- messages.append({"role": "user", "content": val[0]})
20
- if val[1]:
21
- messages.append({"role": "assistant", "content": val[1]})
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- # 🧠 This uses the model like ChatGPT (if it's a chat-compatible model)
28
- for message in client.chat_completion(
29
- messages,
30
- max_tokens=max_tokens,
31
- stream=True,
32
  temperature=temperature,
33
  top_p=top_p,
34
- ):
35
- token = message.choices[0].delta.content
36
- response += token
37
- yield response
38
 
39
  demo = gr.ChatInterface(
40
  respond,
@@ -42,9 +49,4 @@ demo = gr.ChatInterface(
42
  gr.Textbox(value="You are a professional AI coach helping people build skills.", label="System message"),
43
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
44
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
45
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
46
- ],
47
- )
48
-
49
- if __name__ == "__main__":
50
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
+ # Use the base (untrained) model from Hugging Face Hub
5
+ model_id = "mistralai/Mistral-7B-Instruct-v0.3"
6
+ # Your Hugging Face token
7
+
8
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
9
+ model = AutoModelForCausalLM.from_pretrained(model_id)
10
+
11
+ pipe = pipeline(
12
+ "text-generation",
13
+ model=model,
14
+ tokenizer=tokenizer,
15
+ max_new_tokens=512,
16
+ do_sample=True,
17
+ )
18
 
19
  def respond(
20
  message,
 
24
  temperature,
25
  top_p,
26
  ):
27
+ # Combine history and system message into a prompt
28
+ prompt = system_message.strip() + "\n"
29
+ for user, assistant in history:
30
+ if user:
31
+ prompt += f"User: {user}\n"
32
+ if assistant:
33
+ prompt += f"Assistant: {assistant}\n"
34
+ prompt += f"User: {message}\nAssistant:"
35
+
36
+ outputs = pipe(
37
+ prompt,
38
+ max_new_tokens=max_tokens,
 
 
 
 
 
39
  temperature=temperature,
40
  top_p=top_p,
41
+ pad_token_id=tokenizer.eos_token_id,
42
+ )
43
+ response = outputs[0]["generated_text"][len(prompt):]
44
+ yield response.strip()
45
 
46
  demo = gr.ChatInterface(
47
  respond,
 
49
  gr.Textbox(value="You are a professional AI coach helping people build skills.", label="System message"),
50
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
+ gr.Slider(minimum=0.1, maximum=1.0,