BarTriesHisBest commited on
Commit
3642a91
1 Parent(s): eed70e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -6,7 +6,7 @@ import os
6
  # Get the Hugging Face token from environment variable
7
  hf_token = os.environ.get('MISTRAL_ACCESS_TOKEN')
8
 
9
- # Load the Mistral 7B model and tokenizer from Hugging Face
10
  model_name = "mistralai/Mistral-7B-Instruct-v0.3"
11
  tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
12
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", token=hf_token)
@@ -19,8 +19,7 @@ def generate_text(prompt, max_length=500):
19
 
20
  # Function to generate personalized learning plan
21
  def generate_learning_plan(name, age, subject, learning_style, strengths, weaknesses):
22
- prompt = f"""
23
- Create a personalized learning plan for a student with the following details:
24
  Name: {name}
25
  Age: {age}
26
  Subject: {subject}
@@ -33,8 +32,7 @@ def generate_learning_plan(name, age, subject, learning_style, strengths, weakne
33
  2. Suggested resources
34
  3. Personalized learning goals
35
  4. Strategies to address weaknesses
36
- 5. Ways to leverage strengths
37
- """
38
 
39
  response = generate_text(prompt)
40
  return response
 
6
  # Get the Hugging Face token from environment variable
7
  hf_token = os.environ.get('MISTRAL_ACCESS_TOKEN')
8
 
9
+ # Load the Mistral 7B Instruct model and tokenizer from Hugging Face
10
  model_name = "mistralai/Mistral-7B-Instruct-v0.3"
11
  tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
12
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", token=hf_token)
 
19
 
20
  # Function to generate personalized learning plan
21
  def generate_learning_plan(name, age, subject, learning_style, strengths, weaknesses):
22
+ prompt = f"""<s>[INST] Create a personalized learning plan for a student with the following details:
 
23
  Name: {name}
24
  Age: {age}
25
  Subject: {subject}
 
32
  2. Suggested resources
33
  3. Personalized learning goals
34
  4. Strategies to address weaknesses
35
+ 5. Ways to leverage strengths [/INST]"""
 
36
 
37
  response = generate_text(prompt)
38
  return response