jonathanjordan21's picture
Update app.py
b110be1
raw
history blame
1.18 kB
from langchain.llms import HuggingFacePipeline
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM
from components import caption_chain, tag_chain
from components import pexels, utils
import os
model = AutoModelForSeq2SeqLM.from_pretrained("declare-lab/flan-alpaca-gpt4-xl")
tokenizer = AutoTokenizer.from_pretrained("declare-lab/flan-alpaca-gpt4-xl")
pipe = pipeline(
'text2text-generation',
model=model,
tokenizer= tokenizer,
max_length=120
)
local_llm = HuggingFacePipeline(pipeline=pipe)
llm_chain = caption_chain.chain(llm=local_llm)
sum_llm_chain = tag_chain.chain(llm=local_llm)
pexels_api_key = os.getenv('pexels_api_key')
def pred():
folder_name, sentences = pexels.generate_videos("Bluetooth Earphone", pexel_api_key, 1920, 1080)
utils.combine_videos(folder_name)
return {
'video':folder_name,
'captions':sentences.join("\n")
}
with gr.Blocks() as demo:
textbox = gr.Textbox("Product Name")
captions = gr.Textbox()
video = gr.Video()
btn = gr.Button("Submit")
btn.click(pred, inputs=textbox, outputs=[captions,video])
demo.launch()