richardpalestri commited on
Commit
cb16d7e
1 Parent(s): 0a748b6

Comp-anion init in huggingfacespace

Browse files

This is the initial commit to the main branch of a somewhat working web application hosted in streamlit. This application takes in photo or text to help ya out when you need some advice, or just want to get a second opinion.

This is a process heavy app, there are 3 models loaded. One for prechecking with object detection, one for nlp classifications and one for generating text. I would like to thank: SamLowe for the text classification model , bigscience for the text generation model and nlpconnect for the vit gpt2 image captioning model (i.e. what I used for the precheck)

Here are their links:
https://huggingface.co/SamLowe/roberta-base-go_emotions
https://huggingface.co/nlpconnect/vit-gpt2-image-captioning
https://huggingface.co/bigscience/bloom

Files changed (4) hide show
  1. __pycache__/funs.cpython-311.pyc +0 -0
  2. funs.py +37 -0
  3. requirements.txt +4 -0
  4. streamlit.py +36 -0
__pycache__/funs.cpython-311.pyc ADDED
Binary file (2.06 kB). View file
 
funs.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastai.vision.all import PILImage
2
+ from transformers import AutoTokenizer, GPT2LMHeadModel
3
+ import streamlit as st
4
+
5
+ def preprocess_image(file):
6
+ """
7
+ Preprocess image file into an image format to be used by machine learning models
8
+ """
9
+ img = PILImage.create(file)
10
+ img.resize((288,288))
11
+ return img
12
+
13
+ def get_context(model, img):
14
+ """
15
+ Gets context of given image with given image-to-text model
16
+ """
17
+ # Do Image to Text
18
+ text_from_image = model(img)
19
+ # Extract results
20
+ string_result = ''.join(map(str,text_from_image))
21
+ string_result = string_result[19:]
22
+ return string_result
23
+
24
+ def precheck(img_to_text_result):
25
+ """
26
+ Returns true if the given image to text results are about dogs or puppies
27
+ """
28
+ result = img_to_text_result.lower()
29
+ return result.find('handwriting') != -1 or result.find('writing') != -1 or result.find('book')
30
+
31
+ def emotion(model,input):#precheck emotion with text classifier nlp
32
+ output = model(input)
33
+ return output
34
+
35
+ def handle(model , prompt):
36
+ generate = model(prompt)
37
+ return generate
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ fastai==2.7.13
2
+ Pillow==10.2.0
3
+ torch==2.1.1
4
+ transformers==4.34.1
streamlit.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from funs import *
3
+ from transformers import pipeline
4
+ import torch
5
+ from PIL import Image
6
+
7
+ feel = pipeline("text-classification", model="SamLowe/roberta-base-go_emotions") #text classifier, it feels
8
+ knower = pipeline("text-generation", model="bigscience/bloom") #text generation, it handles
9
+
10
+ st.title("Comp-anion")
11
+ st.subheader("Comp-anion is a computer companion! Upload either text or a photo from your journal to get some insight and compassion from your comp-anion!")
12
+
13
+ u_file = st.file_uploader("Choose a file")
14
+
15
+ if u_file is not None:#when file gets uploaded
16
+ seer = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning") #image to text , it sees
17
+ u_file = preprocess(u_file)
18
+ if precheck(get_context(seer,u_file)):#precheck preprocessed img
19
+ st.write("Submission:")
20
+ the_context=get_content(seer, u_file)
21
+ st.write(the_context) #write out the img to text
22
+ emotion_found = emotion(feel,the_context)
23
+ st.write(emotion_found)
24
+ handle(knower,emotion_found,the_context)
25
+
26
+ st.subheader("Pictures aren't your style? Paste your text below and hit analyze!")
27
+ text_box=st.text_input("Paste Your Text Here :)", value="I've had a really nice day today")
28
+
29
+ if st.button("Analyze"):
30
+ prompt="Give advice based on these inputs emotion={} , text given={} "
31
+ emotion_found=emotion(feel,text_box)
32
+ prompt = prompt.format(emotion_found , prompt)
33
+ st.write(prompt)
34
+ st.write(emotion_found)
35
+ generation=handle(knower,prompt)
36
+ st.write(generation[:len(text_box)])