from fastai.vision.all import PILImage from transformers import AutoTokenizer, GPT2LMHeadModel import streamlit as st def preprocess_image(file): """ Preprocess image file into an image format to be used by machine learning models """ img = PILImage.create(file) img.resize((288,288)) return img def get_context(model, img): """ Gets context of given image with given image-to-text model """ # Do Image to Text text_from_image = model(img) # Extract results string_result = ''.join(map(str,text_from_image)) string_result = string_result[19:] return string_result def precheck(img_to_text_result): """ Returns true if the given image to text results are about dogs or puppies """ result = img_to_text_result.lower() return result.find('handwriting') != -1 or result.find('writing') != -1 or result.find('book') def emotion(model,input):#precheck emotion with text classifier nlp output = model(input) return output def handle(model , prompt): generate = model(prompt) return generate