File size: 3,555 Bytes
1fa21e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import os
from IPython.display import Markdown, display
from langchain import PromptTemplate,HuggingFaceHub
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain

import re
import json
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.output_parsers import StructuredOutputParser, ResponseSchema

import warnings
warnings.filterwarnings("ignore")
import os
import textwrap
import langchain
from langchain.llms import HuggingFacePipeline
import torch
import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import LlamaTokenizer, LlamaForCausalLM, pipeline
from langchain.vectorstores import Chroma, FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA, VectorDBQA
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import DirectoryLoader
from InstructorEmbedding import INSTRUCTOR
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import CTransformers
# import random
import random
from langchain.chains.question_answering import load_qa_chain
from model import llm,vectordb as index,embeddings
 

# mistral = CTransformers(
#     model = "mistral-7b-instruct-v0.2.Q4_K_S.gguf",
#     model_type="mistral",
#     max_new_tokens = 4096,
#     temperature = 0,
#     repetition_penalty= 1.1,
#     device="cuda" if torch.cuda.is_available() else "cpu")

# llm=HuggingFaceHub(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", 
#                                         model_kwargs={"temperature":0.05, 
#                                                       "max_length":1024,"top_p":0.95,"repetition_penalty":1.15,"torch_dtype":"torch.float16", "device_map":"auto"})

# topic=["Artificial intelligence", "Algorithm analysis","Computer graphics and image processing", "Computer organization and architecture","Compiler Design",
#     "Computer networks", "Data Structure", "Database management system", "Distributed computing", "internet of things", "mobile computing", "management of software system",
#     "Java", "Operating system", "Python programming", "Soft Computing", "Web programming"]

# select=[i for i in range(len(topic)-1)]

response_schemas = [
    ResponseSchema(name="question", description="Question generated from provided input text data."),
    ResponseSchema(name="choices", description="Available options for a multiple-choice question in comma separated."),
    ResponseSchema(name="answer", description="Correct answer for the asked question.")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = output_parser.get_format_instructions()
prompt = ChatPromptTemplate(
    messages=[
        HumanMessagePromptTemplate.from_template("""Please generate {num_questions} multiple choice questions 
        from {user_prompt}. 
        \n{format_instructions}\n{user_prompt}""")  
    ],
    input_variables=["user_prompt"],
    partial_variables={"format_instructions": format_instructions}
)
final_query = prompt.format_prompt(user_prompt = "computer networks",num_questions=5)


chain = LLMChain(llm=llm,
                 prompt=prompt)

# sub=topic[random.choice(select)]
# # chain = LLMChain(prompt=prompt, 
# #                      llm=llm)

quiz_response = chain.run(user_prompt = "computer networks",num_questions=5)
print(quiz_response)