Claudz163 commited on
Commit
321df2e
1 Parent(s): 5429a99
Files changed (1) hide show
  1. app.py +8 -26
app.py CHANGED
@@ -1,21 +1,16 @@
1
  import streamlit as st
2
  from transformers import pipeline
3
- from huggingface_hub import login, InferenceClient
4
  from PIL import Image
5
  import os
6
 
7
 
8
-
9
-
10
-
11
-
12
-
13
- login(token=os.getenv("HUGGINGFACE_TOKEN"))
14
- client = InferenceClient(api_key="HUGGINGFACE_TOKEN")
15
 
16
  st.header("Character Captions (IN PROGRESS!)")
17
  st.write("Have a character caption any image you upload!")
18
- character = st.selectbox("Choose a character", ["rapper", "monkey", "shrek", "unintelligible"])
19
 
20
  uploaded_img = st.file_uploader("Upload an image")
21
 
@@ -28,21 +23,14 @@ if uploaded_img is not None:
28
 
29
  response = image_captioner(image)
30
  caption = response[0]['generated_text']
31
- st.write("Caption:", caption)
32
 
33
  character_prompts = {
34
  "rapper": f"Describe this scene like you're a rapper: {caption}.",
35
- "monkey": f"Describe this scene like you're a monkey going bananas: {caption}.",
36
  "shrek": f"Describe this scene like you're Shrek: {caption}.",
37
  "unintelligible": f"Describe this scene in a way that makes no sense: {caption}."
38
  }
39
 
40
  prompt = character_prompts[character]
41
- st.write(prompt)
42
-
43
- personality = "rapper"
44
- prompt = character_prompts[personality]
45
-
46
 
47
  messages = [
48
  { "role": "user", "content": prompt }
@@ -55,17 +43,11 @@ if uploaded_img is not None:
55
  stream=True
56
  )
57
 
 
58
  for chunk in stream:
59
- st.write(chunk.choices[0].delta.content)
60
-
61
-
62
-
63
 
64
- # text_generator = pipeline("text-generation", model="meta-llama/Llama-2-7b-hf", framework="pt")
65
 
66
- # prompt = character_prompts[character]
67
- # st.write("Styled Prompt:", prompt)
68
 
69
- # generated_text = text_generator(prompt, max_length=50, do_sample=True)
70
- # styled_caption = generated_text[0]['generated_text']
71
- # st.write("Character-Styled Caption:", styled_caption)
 
1
  import streamlit as st
2
  from transformers import pipeline
3
+ from huggingface_hub import InferenceClient
4
  from PIL import Image
5
  import os
6
 
7
 
8
+ api_key = os.getenv("HUGGINGFACE_TOKEN")
9
+ client = InferenceClient(api_key=api_key)
 
 
 
 
 
10
 
11
  st.header("Character Captions (IN PROGRESS!)")
12
  st.write("Have a character caption any image you upload!")
13
+ character = st.selectbox("Choose a character", ["rapper", "shrek", "unintelligible"])
14
 
15
  uploaded_img = st.file_uploader("Upload an image")
16
 
 
23
 
24
  response = image_captioner(image)
25
  caption = response[0]['generated_text']
 
26
 
27
  character_prompts = {
28
  "rapper": f"Describe this scene like you're a rapper: {caption}.",
 
29
  "shrek": f"Describe this scene like you're Shrek: {caption}.",
30
  "unintelligible": f"Describe this scene in a way that makes no sense: {caption}."
31
  }
32
 
33
  prompt = character_prompts[character]
 
 
 
 
 
34
 
35
  messages = [
36
  { "role": "user", "content": prompt }
 
43
  stream=True
44
  )
45
 
46
+ response = ''
47
  for chunk in stream:
48
+ response += chunk.choices[0].delta.content
49
+
50
+ st.write(response)
 
51
 
 
52
 
 
 
53