ai-chatbot / quiz_gen.py
11James11222's picture
Upload 3 files
1fa21e8 verified
import os
from IPython.display import Markdown, display
from langchain import PromptTemplate,HuggingFaceHub
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
import re
import json
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
import warnings
warnings.filterwarnings("ignore")
import os
import textwrap
import langchain
from langchain.llms import HuggingFacePipeline
import torch
import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import LlamaTokenizer, LlamaForCausalLM, pipeline
from langchain.vectorstores import Chroma, FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA, VectorDBQA
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import DirectoryLoader
from InstructorEmbedding import INSTRUCTOR
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import CTransformers
# import random
import random
from langchain.chains.question_answering import load_qa_chain
from model import llm,vectordb as index,embeddings
# mistral = CTransformers(
# model = "mistral-7b-instruct-v0.2.Q4_K_S.gguf",
# model_type="mistral",
# max_new_tokens = 4096,
# temperature = 0,
# repetition_penalty= 1.1,
# device="cuda" if torch.cuda.is_available() else "cpu")
# llm=HuggingFaceHub(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
# model_kwargs={"temperature":0.05,
# "max_length":1024,"top_p":0.95,"repetition_penalty":1.15,"torch_dtype":"torch.float16", "device_map":"auto"})
# topic=["Artificial intelligence", "Algorithm analysis","Computer graphics and image processing", "Computer organization and architecture","Compiler Design",
# "Computer networks", "Data Structure", "Database management system", "Distributed computing", "internet of things", "mobile computing", "management of software system",
# "Java", "Operating system", "Python programming", "Soft Computing", "Web programming"]
# select=[i for i in range(len(topic)-1)]
response_schemas = [
ResponseSchema(name="question", description="Question generated from provided input text data."),
ResponseSchema(name="choices", description="Available options for a multiple-choice question in comma separated."),
ResponseSchema(name="answer", description="Correct answer for the asked question.")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = output_parser.get_format_instructions()
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template("""Please generate {num_questions} multiple choice questions
from {user_prompt}.
\n{format_instructions}\n{user_prompt}""")
],
input_variables=["user_prompt"],
partial_variables={"format_instructions": format_instructions}
)
final_query = prompt.format_prompt(user_prompt = "computer networks",num_questions=5)
chain = LLMChain(llm=llm,
prompt=prompt)
# sub=topic[random.choice(select)]
# # chain = LLMChain(prompt=prompt,
# # llm=llm)
quiz_response = chain.run(user_prompt = "computer networks",num_questions=5)
print(quiz_response)