BarTriesHisBest commited on
Commit
4276da5
1 Parent(s): 3642a91

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -21
app.py CHANGED
@@ -1,25 +1,14 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import torch
4
  import os
 
5
 
6
- # Get the Hugging Face token from environment variable
7
- hf_token = os.environ.get('MISTRAL_ACCESS_TOKEN')
8
-
9
- # Load the Mistral 7B Instruct model and tokenizer from Hugging Face
10
- model_name = "mistralai/Mistral-7B-Instruct-v0.3"
11
- tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
12
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", token=hf_token)
13
-
14
- # Function to generate text using the Mistral model
15
- def generate_text(prompt, max_length=500):
16
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
17
- outputs = model.generate(**inputs, max_length=max_length, num_return_sequences=1)
18
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
19
 
20
  # Function to generate personalized learning plan
21
  def generate_learning_plan(name, age, subject, learning_style, strengths, weaknesses):
22
- prompt = f"""<s>[INST] Create a personalized learning plan for a student with the following details:
 
23
  Name: {name}
24
  Age: {age}
25
  Subject: {subject}
@@ -32,10 +21,24 @@ def generate_learning_plan(name, age, subject, learning_style, strengths, weakne
32
  2. Suggested resources
33
  3. Personalized learning goals
34
  4. Strategies to address weaknesses
35
- 5. Ways to leverage strengths [/INST]"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- response = generate_text(prompt)
38
- return response
39
 
40
  # Gradio interface
41
  iface = gr.Interface(
@@ -54,5 +57,4 @@ iface = gr.Interface(
54
  )
55
 
56
  # Launch the Gradio app
57
- if __name__ == "__main__":
58
- iface.launch()
 
1
  import gradio as gr
 
 
2
  import os
3
+ from groq import Groq
4
 
5
+ # Initialize the Groq client
6
+ client = Groq(api_key=os.environ["GROQ_API_KEY"])
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  # Function to generate personalized learning plan
9
  def generate_learning_plan(name, age, subject, learning_style, strengths, weaknesses):
10
+ prompt = f"""
11
+ Create a personalized learning plan for a student with the following details:
12
  Name: {name}
13
  Age: {age}
14
  Subject: {subject}
 
21
  2. Suggested resources
22
  3. Personalized learning goals
23
  4. Strategies to address weaknesses
24
+ 5. Ways to leverage strengths
25
+ """
26
+
27
+ # Call the Groq API to generate text
28
+ chat_completion = client.chat.completions.create(
29
+ messages=[
30
+ {
31
+ "role": "user",
32
+ "content": prompt,
33
+ }
34
+ ],
35
+ model="mixtral-8x7b-32768", # You can change this to the model you prefer
36
+ temperature=0.7,
37
+ max_tokens=2000,
38
+ )
39
 
40
+ # Extract and return the generated text
41
+ return chat_completion.choices[0].message.content
42
 
43
  # Gradio interface
44
  iface = gr.Interface(
 
57
  )
58
 
59
  # Launch the Gradio app
60
+ iface.launch()