| import gradio | |
| # from transformers import pipeline | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import os | |
| os.getenv("HF_TOKEN") | |
| # Initialize the Hugging Face model | |
| # model = pipeline(model='google/flan-t5-base') | |
| tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b", use_auth_token=True) | |
| model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", use_auth_token=True) | |
| # Define the chatbot function | |
| def chatbot(input_text): | |
| prompt = f"Give the answer of the given input in context from the bhagwat geeta. give suggestions to user which are based upon the meanings of shlok in bhagwat geeta, input = {input_text}" | |
| # Generate a response from the Hugging Face model | |
| # response = model(prompt, max_length=250, do_sample=True)[0]['generated_text'].strip() | |
| input_text = "Write me a poem about Machine Learning." | |
| input_ids = tokenizer(prompt, return_tensors="pt") | |
| outputs = model.generate(**input_ids) | |
| # Return the bot response | |
| return outputs | |
| # Define the Gradio interface | |
| gradio_interface = gradio.Interface( | |
| fn=chatbot, | |
| inputs='text', | |
| outputs='text', | |
| title='Chatbot', | |
| description='A weird chatbot conversations experience.', | |
| examples=[ | |
| ['Hi, how are you?'] | |
| ] | |
| ) | |
| # Launch the Gradio interface | |
| gradio_interface.launch() | |
| # from dotenv import load_dotenv | |
| # from langchain import HuggingFaceHub, LLMChain | |
| # from langchain import PromptTemplates | |
| # import gradio | |
| # load_dotenv() | |
| # os.getenv('HF_API') | |
| # hub_llm = HuggingFaceHub(repo_id='facebook/blenderbot-400M-distill') | |
| # prompt = prompt_templates( | |
| # input_variable = ["question"], | |
| # template = "Answer is: {question}" | |
| # ) | |
| # hub_chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True) | |
| # Sample code for AI language model interaction | |
| # from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
| # import gradio | |
| # def simptok(data): | |
| # # Load pre-trained model and tokenizer (using the transformers library) | |
| # model_name = "gpt2" | |
| # tokenizer = GPT2Tokenizer.from_pretrained(model_name) | |
| # model = GPT2LMHeadModel.from_pretrained(model_name) | |
| # # User input | |
| # user_input = data | |
| # # Tokenize input | |
| # input_ids = tokenizer.encode(user_input, return_tensors="pt") | |
| # # Generate response | |
| # output = model.generate(input_ids, max_length=50, num_return_sequences=1) | |
| # response = tokenizer.decode(output[0], skip_special_tokens=True) | |
| # return response | |
| # def responsenew(data): | |
| # return simptok(data) | |
| # from hugchat import hugchat | |
| # import gradio as gr | |
| # import time | |
| # # Create a chatbot connection | |
| # chatbot = hugchat.ChatBot(cookie_path="cookies.json") | |
| # # New a conversation (ignore error) | |
| # id = chatbot.new_conversation() | |
| # chatbot.change_conversation(id) | |
| # def get_answer(data): | |
| # return chatbot.chat(data) | |
| # gradio_interface = gr.Interface( | |
| # fn = get_answer, | |
| # inputs = "text", | |
| # outputs = "text" | |
| # ) | |
| # gradio_interface.launch() | |
| # gradio_interface = gradio.Interface( | |
| # fn = responsenew, | |
| # inputs = "text", | |
| # outputs = "text" | |
| # ) | |
| # gradio_interface.launch() | |