Shahbazakbar commited on
Commit
e915d27
·
verified ·
1 Parent(s): 28c5e1a

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -0
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Step 1: Import Libraries
2
+ import os
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ import gradio as gr
6
+
7
+ # Step 2: Load Environment Variables
8
+ # Load token from environment variable
9
+ HUGGING_FACE_TOKEN = os.getenv("HF_TOKEN")
10
+
11
+ # Step 3: Load the Model with Authentication
12
+ # Load Mistral 7B with authentication
13
+ mistral_tokenizer = AutoTokenizer.from_pretrained(
14
+ "mistralai/Mistral-7B-v0.1",
15
+ use_auth_token=HUGGING_FACE_TOKEN
16
+ )
17
+ mistral_model = AutoModelForCausalLM.from_pretrained(
18
+ "mistralai/Mistral-7B-v0.1",
19
+ use_auth_token=HUGGING_FACE_TOKEN,
20
+ torch_dtype=torch.float16, # Use half-precision for faster inference
21
+ device_map="auto" # Automatically loads the model on GPU if available
22
+ )
23
+
24
+ # Step 4: Define the Chatbot Logic
25
+ def generate_response(prompt):
26
+ # Tokenize the input prompt
27
+ inputs = mistral_tokenizer(prompt, return_tensors="pt").to(mistral_model.device)
28
+
29
+ # Generate the response
30
+ outputs = mistral_model.generate(**inputs, max_length=200)
31
+
32
+ # Decode the response
33
+ response = mistral_tokenizer.decode(outputs[0], skip_special_tokens=True)
34
+ return response
35
+
36
+ # Step 5: Create the Gradio Interface
37
+ def chatbot(prompt):
38
+ response = generate_response(prompt)
39
+ return response
40
+
41
+ interface = gr.Interface(
42
+ fn=chatbot,
43
+ inputs="text",
44
+ outputs="text",
45
+ title="Mistral 7B Chatbot",
46
+ description="Ask questions and get answers from Mistral 7B!"
47
+ )
48
+
49
+ # Step 6: Launch the App
50
+ interface.launch()