LuckRafly commited on
Commit
3fb4082
1 Parent(s): 06fd637

Update function.py

Browse files
Files changed (1) hide show
  1. function.py +16 -8
function.py CHANGED
@@ -2,26 +2,34 @@ from langchain.prompts import PromptTemplate
2
  from langchain.llms import CTransformers
3
  from langchain.chains import LLMChain
4
  from langchain.chains import SequentialChain
 
 
5
 
6
- config = {'max_new_tokens': 256, 'temperature': 0.1}
 
7
 
8
  # Create function for app
9
  def GetLLMResponse(selected_topic_level,
10
  selected_topic,
11
  num_quizzes):
12
-
13
  # Calling llama model
14
  # llm = CTransformers(model="D:\Code Workspace\DL Model\llama-2-7b-chat.ggmlv3.q8_0.bin",
15
  # model_type = 'llama',
16
  # config = config)
17
 
18
- llm = CTransformers(model='TheBloke/Llama-2-7B-Chat-GGML',
19
- model_file = 'llama-2-7b-chat.ggmlv3.q8_0.bin',
20
- model_type = 'llama',
21
- config = config)
 
 
 
 
 
22
 
23
  ## Create LLM Chaining
24
- questions_template = "Generate a {selected_topic_level} math quiz on the topic of {selected_topic}. Include {num_quizzes} questions without providing answers."
25
  questions_prompt = PromptTemplate(input_variables=["selected_topic_level", "selected_topic", "num_quizzes"],
26
  template=questions_template)
27
  questions_chain = LLMChain(llm= llm,
@@ -29,7 +37,7 @@ def GetLLMResponse(selected_topic_level,
29
  output_key = "questions")
30
 
31
 
32
- answer_template = "From this Question:\n {questions}\n\n gave me answer to each one of them"
33
  answer_prompt = PromptTemplate(input_variables = ["questions"],
34
  template = answer_template)
35
  answer_chain = LLMChain(llm = llm,
 
2
  from langchain.llms import CTransformers
3
  from langchain.chains import LLMChain
4
  from langchain.chains import SequentialChain
5
+ from langchain.llms import HuggingFaceHub
6
+ from dotenv import load_dotenv
7
 
8
+ load_dotenv();
9
+ config = {'max_new_tokens': 512, 'temperature': 0.6}
10
 
11
  # Create function for app
12
  def GetLLMResponse(selected_topic_level,
13
  selected_topic,
14
  num_quizzes):
15
+
16
  # Calling llama model
17
  # llm = CTransformers(model="D:\Code Workspace\DL Model\llama-2-7b-chat.ggmlv3.q8_0.bin",
18
  # model_type = 'llama',
19
  # config = config)
20
 
21
+ # llm = CTransformers(model='TheBloke/Llama-2-7B-Chat-GGML',
22
+ # model_file = 'llama-2-7b-chat.ggmlv3.q8_0.bin',
23
+ # model_type = 'llama',
24
+ # config = config)
25
+
26
+ llm = HuggingFaceHub(
27
+ repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1",
28
+ model_kwargs = config
29
+ )
30
 
31
  ## Create LLM Chaining
32
+ questions_template = "I want you to just generate question with this specification: Generate a {selected_topic_level} math quiz on the topic of {selected_topic}. Generate only {num_quizzes} questions not more and without providing answers. The Question should not in image format/link"
33
  questions_prompt = PromptTemplate(input_variables=["selected_topic_level", "selected_topic", "num_quizzes"],
34
  template=questions_template)
35
  questions_chain = LLMChain(llm= llm,
 
37
  output_key = "questions")
38
 
39
 
40
+ answer_template = "I want you to become a teacher answer this specific Question:\n {questions}\n\n. You should gave me a straightforward and consise explanation and answer to each one of them"
41
  answer_prompt = PromptTemplate(input_variables = ["questions"],
42
  template = answer_template)
43
  answer_chain = LLMChain(llm = llm,