anasrz commited on
Commit
85fbc24
1 Parent(s): 3488695

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +39 -0
model.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ from torch.utils.data import Dataset
4
+ from transformers import pipeline
5
+ import streamlit as st
6
+ import requests
7
+
8
+ def get_story(image_path):
9
+ model_name = st.selectbox('Select the Model', ['alpaca-lora', 'flan-t5-base'])
10
+ image_to_text = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
11
+ caption = image_to_text(image_path)
12
+ caption = caption[0]['generated_text']
13
+ st.write(f"Generated Caption: {caption}")
14
+ input_string = f"""Question: Generate 100 words story on this text
15
+ '{caption}' Answer:"""
16
+ if model_name == 'flan-t5-base':
17
+ from transformers import T5ForConditionalGeneration, AutoTokenizer
18
+ model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base", device_map="auto", load_in_8bit=True)
19
+ tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
20
+ inputs = tokenizer(input_string, return_tensors="pt").input_ids.to("cpu")
21
+ outputs = model.generate(inputs, max_length=1000)
22
+ outputs = tokenizer.decode(outputs[0])
23
+ else:
24
+
25
+ response = requests.post("https://tloen-alpaca-lora.hf.space/run/predict", json={
26
+ "data": [
27
+ "Write a story about this image caption",
28
+ caption,
29
+ 0.1,
30
+ 0.75,
31
+ 40,
32
+ 4,
33
+ 128,
34
+ ]
35
+ }).json()
36
+
37
+ data = response["data"]
38
+ outputs = data[0]
39
+ return outputs