cookgpt / app.py
VishalMysore's picture
Update app.py
25c266c verified
raw
history blame contribute delete
No virus
7.79 kB
import gradio as gr
from urllib.parse import quote
import re
import torch
from transformers import pipeline,AutoTokenizer, AutoModelForSeq2SeqLM
print("initializing")
pipe = pipeline("text-generation", model="VishalMysore/cookgptlama", torch_dtype=torch.bfloat16)
nllbtokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-1.3B")
nllbmodel = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-1.3B")
print("initializing done")
def read_html_file(file_path):
try:
with open(file_path, 'r', encoding='utf-8') as file:
html_content = file.read()
html_content = html_content.encode('ascii', 'ignore').decode('ascii')
html_content= html_content.replace("\n","")
html_content=re.sub( ">\s+<", "><" , html_content)
return html_content
except FileNotFoundError:
print(f"File not found: {file_path}")
return None
except Exception as e:
print(f"An error occurred: {str(e)}")
return None
def remove_text_after_marker(input_text, marker):
# Find the index of the marker
index = input_text.find(marker)
# Check if the marker was found
if index != -1:
# Extract the text before the marker
output_text = input_text[:index]
else:
# If the marker was not found, return the original input
output_text = input_text
return output_text
def remove_text_after(input_text, substring):
# Find the index of the substring
index = input_text.find(substring)
# Check if the substring was found
if index != -1:
# Extract the text after the substring
output_text = input_text[index + len(substring):]
else:
# If the substring was not found, return the original input
output_text = input_text
return output_text
def remove_text(input_text):
# Specify the text portion to be removed
text_to_remove = "\nYou are my personal chef experienced in Indian spicy food</s>\n\nprovide me recipe of paneer bhurji with cook time </s>\n\nIngredients:"
# Find the starting index of the text portion to be removed
start_index = input_text.find(text_to_remove)
# Check if the text portion was found
if start_index != -1:
# Remove the text portion from the input
output_text = input_text[:start_index] + input_text[start_index + len(text_to_remove):]
else:
# If the text portion was not found, return the original input
output_text = input_text
return output_text
def translate(tgt_lang_id,text):
print('translating')
#Get the code of your target langauge. After getting the language code; get the id
tgt_lang_id = nllbtokenizer.lang_code_to_id[tgt_lang_id]
#tokenize your input
model_inputs = nllbtokenizer(text, return_tensors='pt', padding='longest')
#generate output
gen_tokens = nllbmodel.generate(**model_inputs , forced_bos_token_id=tgt_lang_id)
#decode output — convert to text
translated_text = nllbtokenizer.batch_decode(gen_tokens, skip_special_tokens=True)
print('translating complete')
#print
print(translated_text)
return translated_text
def askllmpipe(text):
print(text)
messages = [{"role": "system","content": "You are my personal chef experienced in Indian spicy food",}, {"role": "user", "content": text},]
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
outputs = pipe(prompt, max_new_tokens=500, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
print('got the result from llm')
return_text=outputs[0]["generated_text"]
print('trimming')
return_text= remove_text_after(return_text,"<|assistant|>")
print('trimming part 2')
return_text= remove_text_after_marker(return_text,"###")
print('trimming part 2 done')
return return_text
def answer(text):
return "I want to eat raw salad with lots of onion"
def format(text):
#print(text)
messages = [{"role": "system","content": "You will read the provided text and format it neatly these section, Ingredients , Cooking details, Calories, cook time ",}, {"role": "user", "content": text},]
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
outputs = pipe(prompt, max_new_tokens=500, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
print('got the result from llm')
return_text=outputs[0]["generated_text"]
print('trimming')
return_text= remove_text_after(return_text,"<|assistant|>")
print('trimming part 2')
return_text= remove_text_after_marker(return_text,"###")
print('trimming part 2 done')
return return_text
def callchain(recipe , dropdown_input):
language_mapping = {"English": "eng_Latn", "Hindi": "hin_Deva", "Tamil": "tam_Taml","Gujarati":"guj_Gujr","Telugu":"tel_Telu"}
# Get the mapped value for the selected language
selected_language = language_mapping.get(dropdown_input, "")
output_text = askllmpipe(recipe)
# If the selected language is not English, call the translation method
if selected_language != "eng_Latn": # Check if language is not English
output_text = translate(selected_language, output_text)
return output_text
html_content = read_html_file("cookgpt.html")
descriptionFindRecipe="## Welcome to CookGPT\n CookGPT is an innovative AI-based chef that combines the charm of traditional cooking with the efficiency of modern technology. Whether you're a culinary enthusiast, a busy professional, or someone looking for culinary inspiration, CookGPT is designed to make your cooking experience delightful, personalized, and effortless."
descriptionAddRecipe="## what your favorite? \n Users will have the capability to contribute their custom recipes by either adding them manually or uploading video content. These recipes will be subject to upvoting by other users, and those receiving positive feedback will be scheduled for fine-tuning our Language Model (LLM). This collaborative approach allows the community to actively participate in refining and enhancing the model based on the most appreciated and valued recipes."
descriptionFineTune="## Miss something \n Finetuning Parameters Customization, we will keep rebuilding the based model untill all the recipes are covered"
with gr.Blocks() as demo:
with gr.Tab("Find Recipe"):
gr.Markdown(descriptionFindRecipe)
findRecipeText = gr.Textbox(label="Recipe Query",info="What do you want to eat today!")
languageDD =gr.Dropdown(
["English", "Hindi", "Tamil", "Telugu", "Gujarati"], label="Languge", value="Hindi",info="Select your desired Language!"
)
findbtn = gr.Button(value="Find Recipe")
findRecipeTextOutPut = gr.Textbox(label="Recipe Details")
findbtn.click(callchain, inputs=[findRecipeText,languageDD],outputs=[findRecipeTextOutPut])
examples = gr.Examples(examples=[["Provide me Recipe for Paneer Butter Masala.","Tamil"], ["Provide me a Recipe for Aloo Kofta.","Hindi"]],
inputs=[findRecipeText,languageDD])
gr.HTML(html_content)
with gr.Tab("Add Recipe"):
gr.Markdown(descriptionAddRecipe)
addRecipe = gr.Textbox(label="Add Your Recipe",info="Favorite dish made by your Grandma!")
btn = gr.Button(value="Format Recipe")
formatedRecipe = gr.Textbox(label="Formated Recipe",info="Format the Recipe!")
btn.click(format, inputs=[addRecipe],outputs=[formatedRecipe])
gr.HTML(html_content)
with gr.Tab("Fine Tune The model"):
gr.Markdown(descriptionFineTune)
btn = gr.Button(value="View Finetuning Dataset")
gr.HTML(html_content)
demo.launch()