Spaces:
Runtime error
Runtime error
Yiqun Chen
commited on
Commit
β’
d1a5e77
1
Parent(s):
0b60c4e
Add .gitattributes
Browse files- .DS_Store +0 -0
- README.md +4 -3
- __pycache__/details.cpython-310.pyc +0 -0
- __pycache__/home.cpython-310.pyc +0 -0
- __pycache__/image2image.cpython-310.pyc +0 -0
- __pycache__/text2image.cpython-310.pyc +0 -0
- __pycache__/visualization.cpython-310.pyc +0 -0
- app.py +24 -0
- data/.DS_Store +0 -0
- data/.gitignore +1 -0
- data/example_images/.DS_Store +0 -0
- details.py +73 -0
- home.py +84 -0
- image2image.py +239 -0
- requirements.txt +10 -0
- resources/.DS_Store +0 -0
- resources/4x/.DS_Store +0 -0
- resources/4x/change_time.pdf +0 -0
- resources/4x/change_time.png +3 -0
- resources/4x/cluster_change.png +3 -0
- resources/4x/nsfw_fig.png +3 -0
- resources/4x/test_figure_1_web.png +3 -0
- resources/csv_files/.DS_Store +0 -0
- resources/csv_files/twigma_release_sampled.csv +3 -0
- text2image.py +226 -0
- visualization.py +99 -0
- viz_scripts/calc_img.py +75 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
README.md
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
---
|
2 |
title: TWIGMA
|
3 |
emoji: π
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: streamlit
|
7 |
-
sdk_version: 1.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
title: TWIGMA
|
3 |
emoji: π
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: gray
|
6 |
sdk: streamlit
|
7 |
+
sdk_version: 1.17.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
license: mit
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
__pycache__/details.cpython-310.pyc
ADDED
Binary file (4.75 kB). View file
|
|
__pycache__/home.cpython-310.pyc
ADDED
Binary file (6.84 kB). View file
|
|
__pycache__/image2image.cpython-310.pyc
ADDED
Binary file (7.77 kB). View file
|
|
__pycache__/text2image.cpython-310.pyc
ADDED
Binary file (8.29 kB). View file
|
|
__pycache__/visualization.cpython-310.pyc
ADDED
Binary file (4.45 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import home
|
2 |
+
import text2image
|
3 |
+
import image2image
|
4 |
+
import visualization
|
5 |
+
import streamlit as st
|
6 |
+
import details
|
7 |
+
|
8 |
+
st.set_page_config(layout="wide")
|
9 |
+
|
10 |
+
st.sidebar.title("TWIGMA")
|
11 |
+
st.sidebar.markdown("## Menu")
|
12 |
+
|
13 |
+
PAGES = {
|
14 |
+
"Introduction": home,
|
15 |
+
"Investigate themes of TWIGMA images": visualization,
|
16 |
+
}
|
17 |
+
|
18 |
+
page = st.sidebar.radio("", list(PAGES.keys()))
|
19 |
+
st.sidebar.markdown("## Links")
|
20 |
+
|
21 |
+
st.sidebar.markdown("[TWIGMA Data](https://yiqunchen.github.io/TWIGMA/)")
|
22 |
+
st.sidebar.markdown("[TWIGMA Preprint (to-be-added)]")
|
23 |
+
st.sidebar.markdown("[TWIGMA Analysis Code](https://github.com/yiqunchen/TWIGMA_code)")
|
24 |
+
PAGES[page].app()
|
data/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
data/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
/data_ignore
|
data/example_images/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
details.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
import streamlit as st
|
3 |
+
import streamlit.components.v1 as components
|
4 |
+
from PIL import Image
|
5 |
+
import base64
|
6 |
+
import pandas as pd
|
7 |
+
|
8 |
+
def read_markdown_file(markdown_file):
|
9 |
+
return Path(markdown_file).read_text()
|
10 |
+
|
11 |
+
def render_svg(svg_filename):
|
12 |
+
with open(svg_filename,"r") as f:
|
13 |
+
lines = f.readlines()
|
14 |
+
svg=''.join(lines)
|
15 |
+
"""Renders the given svg string."""
|
16 |
+
b64 = base64.b64encode(svg.encode('utf-8')).decode("utf-8")
|
17 |
+
html = r'<img src="data:image/svg+xml;base64,%s"/>' % b64
|
18 |
+
st.write(html, unsafe_allow_html=True)
|
19 |
+
|
20 |
+
|
21 |
+
def app():
|
22 |
+
#intro_markdown = read_markdown_file("introduction.md")
|
23 |
+
#st.markdown(intro_markdown, unsafe_allow_html=True)
|
24 |
+
|
25 |
+
st.markdown("## TWIGMA (TWItter Generative-ai images with MetadatA)")
|
26 |
+
st.markdown("### A dataset to understand content, variation, and longitudinal theme shifts of AI-generated images on Twitter.")
|
27 |
+
|
28 |
+
render_svg("resources/SVG/Asset 49.svg")
|
29 |
+
|
30 |
+
st.markdown("This is a website for TWIGMA (TWItter Generative-ai images with MetadatA), a comprehensive dataset encompassing 800,000 gen-AI images collected from Jan 2021 to March 2023 on Twitter, with associated metadata (e.g., tweet text, creation date, number of likes). Through a comparative analysis of TWIGMA with natural images and human artwork, we find that gen-AI images possess distinctive characteristics and exhibit, on average, lower variability when compared to their non-gen-AI counterparts. Additionally, we find that the similarity between a gen-AI image and natural images (i) is inversely correlated with the number of likes; and (ii) can be used to identify human images that served as inspiration for the gen-AI creations. Finally, we observe a longitudinal shift in the themes of AI-generated images on Twitter, with users increasingly sharing artistically sophisticated content such as intricate human portraits, whereas their interest in simple subjects such as natural scenes and animals has decreased. Our analyses and findings underscore the significance of TWIGMA as a unique data resource for studying AI-generated images.", unsafe_allow_html=True)
|
31 |
+
|
32 |
+
st.markdown('#### Watch our successful image-to-image retrieval via PLIP:')
|
33 |
+
col1, col2, col3, _, _ = st.columns([1, 1, 1, 1, 1])
|
34 |
+
with col1:
|
35 |
+
st.markdown("[Similar cells](https://twitter.com/ZhiHuangPhD/status/1641906064823312384)")
|
36 |
+
example1 = Image.open('resources/example/1.png')
|
37 |
+
st.image(example1, caption='Example 1', output_format='png')
|
38 |
+
with col2:
|
39 |
+
st.markdown("[Salient object](https://twitter.com/ZhiHuangPhD/status/1641899092195565569)")
|
40 |
+
example2 = Image.open('resources/example/2.png')
|
41 |
+
st.image(example2, caption='Example 2', output_format='png')
|
42 |
+
with col3:
|
43 |
+
st.markdown("[Similar region](https://twitter.com/ZhiHuangPhD/status/1641911235288645632)")
|
44 |
+
example3 = Image.open('resources/example/3.png')
|
45 |
+
st.image(example3, caption='Example 3', output_format='png')
|
46 |
+
|
47 |
+
|
48 |
+
st.markdown("#### PLIP is trained on the largest public visionβlanguage pathology dataset: OpenPath")
|
49 |
+
|
50 |
+
col1, col2 = st.columns([1, 1])
|
51 |
+
with col1:
|
52 |
+
st.markdown("Following the usage policy and guidelines from Twitter and other entities, we established so far the largest public visionβlanguage pathology dataset. To ensure the quality of the data, OpenPath followed rigorous protocols for cohort inclusion and exclusion, including the removal of retweets, sensitive tweets, and non-pathology images, as well as text cleaning.", unsafe_allow_html=True)
|
53 |
+
st.markdown("The final OpenPath dataset consists of:", unsafe_allow_html=True)
|
54 |
+
st.markdown("- Tweets: 116,504 imageβtext pairs from Twitter posts (tweets) during Mar. 21, 2006 β Nov. 15, 2022 across 32 pathology subspecialty-specific hashtags;", unsafe_allow_html=True)
|
55 |
+
with col2:
|
56 |
+
render_svg("resources/SVG/Asset 50.svg")
|
57 |
+
render_svg("resources/SVG/Asset 51.svg")
|
58 |
+
|
59 |
+
|
60 |
+
|
61 |
+
st.markdown("#### PLIP is trained with connecting the image and text via contrastive learning")
|
62 |
+
|
63 |
+
col1, col2 = st.columns([3, 1])
|
64 |
+
with col1:
|
65 |
+
st.markdown("The proposed PLIP model generates two embedding vectors from both the text and image encoders. These vectors were then forced to be similar for each of the paired image and text vectors and dissimilar for non-paired image and text pairs via contrastive learning.", unsafe_allow_html=True)
|
66 |
+
fig1e = Image.open('resources/4x/Fig1e.png')
|
67 |
+
st.image(fig1e, caption='PLIP training', output_format='png')
|
68 |
+
|
69 |
+
with col2:
|
70 |
+
render_svg("resources/SVG/Asset 53.svg")
|
71 |
+
|
72 |
+
|
73 |
+
|
home.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
import streamlit as st
|
3 |
+
import streamlit.components.v1 as components
|
4 |
+
from PIL import Image
|
5 |
+
import base64
|
6 |
+
import pandas as pd
|
7 |
+
from matplotlib import pyplot as plt
|
8 |
+
|
9 |
+
def read_markdown_file(markdown_file):
|
10 |
+
return Path(markdown_file).read_text()
|
11 |
+
|
12 |
+
def render_svg(svg_filename):
|
13 |
+
with open(svg_filename,"r") as f:
|
14 |
+
lines = f.readlines()
|
15 |
+
svg=''.join(lines)
|
16 |
+
"""Renders the given svg string."""
|
17 |
+
b64 = base64.b64encode(svg.encode('utf-8')).decode("utf-8")
|
18 |
+
html = r'<img src="data:image/svg+xml;base64,%s"/>' % b64
|
19 |
+
st.write(html, unsafe_allow_html=True)
|
20 |
+
|
21 |
+
def app():
|
22 |
+
|
23 |
+
st.markdown("## TWIGMA (TWItter Generative-ai images with MetadatA)")
|
24 |
+
st.markdown("### A dataset to understand content, variation, and longitudinal theme shifts of AI-generated images on Twitter.")
|
25 |
+
|
26 |
+
st.markdown("Recent progress in generative artificial intelligence (gen-AI) has enabled the generation of photo-realistic and artistically-inspiring photos at a single click, catering to millions of users online. To explore how people use gen-AI models such as DALLE and StableDiffusion, it is critical to understand the themes, contents, and variations present in the AI-generated photos. In this work, we introduce TWIGMA (TWItter Generative-ai images with MetadatA), a comprehensive dataset encompassing 800,000 gen-AI images collected from Jan 2021 to March 2023 on Twitter, with associated metadata (e.g., tweet text, creation date, number of likes). ", unsafe_allow_html=True)
|
27 |
+
|
28 |
+
# st.markdown("This is a website for TWIGMA (TWItter Generative-ai images with MetadatA), a comprehensive dataset encompassing 800,000 gen-AI images collected from Jan 2021 to March 2023 on Twitter, with associated metadata (e.g., tweet text, creation date, number of likes). Through a comparative analysis of TWIGMA with natural images and human artwork, we find that gen-AI images possess distinctive characteristics and exhibit, on average, lower variability when compared to their non-gen-AI counterparts. Additionally, we find that the similarity between a gen-AI image and natural images (i) is inversely correlated with the number of likes; and (ii) can be used to identify human images that served as inspiration for the gen-AI creations. Finally, we observe a longitudinal shift in the themes of AI-generated images on Twitter, with users increasingly sharing artistically sophisticated content such as intricate human portraits, whereas their interest in simple subjects such as natural scenes and animals has decreased. Our analyses and findings underscore the significance of TWIGMA as a unique data resource for studying AI-generated images.", unsafe_allow_html=True)
|
29 |
+
|
30 |
+
fig1e = Image.open('./resources/4x/test_figure_1_web.png')
|
31 |
+
st.image(fig1e, caption='Creation process and content overview of TWIGMA.', output_format='png')
|
32 |
+
|
33 |
+
# render_svg("./resources/SVG/Asset 49.svg")
|
34 |
+
|
35 |
+
st.markdown("### Preview of TWIGMA: ")
|
36 |
+
st.markdown("We display 10\% of the TWIGMA data below; the full data can be downladed at [insert url]. Note that in n accordance with the privacy and control policy of Twitter, *no raw content* from Twitter is included in this dataset and users could and need to retrieve the original Twitter content used for analysis using the Twitter id.")
|
37 |
+
twigma_df = pd.read_csv("./resources/csv_files/twigma_release_sampled.csv") #path folder of the data file
|
38 |
+
st.write(twigma_df) #displays the table of data
|
39 |
+
|
40 |
+
st.markdown("""
|
41 |
+
TWIGMA contains the following fields:
|
42 |
+
- id: This is the Twitter id uniquely identifying each tweet used in this dataset and our analysis;
|
43 |
+
- image_name: This is the media id (see details at the Twitter [page](https://developer.twitter.com/en/docs/twitter-ads-api/creatives/guides/identifying-media)) used to uniquely identify each photo. Leveraging this field is necessary since a tweet can contain multiple images;
|
44 |
+
- created_at: This is the time of creation corresponding to the Twitter id;
|
45 |
+
- like_count: This is the number of likes collected from official Twitter API (snapshot: the week of May 29th). Note that some likes are not available because the corresponding tweets have been deleted since we first downloaded the photos;
|
46 |
+
- quote_count: Same as like_count, but for quotes;
|
47 |
+
- reply_count: Same as like_count, but for replies;
|
48 |
+
- all_captions: This is the BLIP-generated (Li et al. 2022) captions for the corresponding image;
|
49 |
+
- label_10_cluster: This is the assigned k-means cluster (k=10 so this number varies from 1 to 10);
|
50 |
+
- possibly_sensitive: Binary variable indicating whether the media content has been marked as sensitive/NSFW by Twitter;
|
51 |
+
- nsfw_score: The predicted NSFW from a pre-trained CLIP-based NSFW detector (ranges from 0 to 1; closer to 1 means more likely to be NSFW);
|
52 |
+
- UMAP_dim_1: The first dimension for a two-dimensional UMAP projection of the CLIP-ViT-L-14 embeddings of the images in TWIGMA.
|
53 |
+
- UMAP_dim_2: The second dimension for a two-dimensional UMAP projection of the CLIP-ViT-L-14 embeddings of the images in TWIGMA.
|
54 |
+
""")
|
55 |
+
|
56 |
+
# Group the DataFrame by 'possibly_sensitive' and calculate the mean 'nsfw_score' for each category
|
57 |
+
|
58 |
+
|
59 |
+
st.markdown("### Safety note: ")
|
60 |
+
st.markdown("""
|
61 |
+
It is important to note that *a substantial amount* of images have been classified as NSFW (not-safe-for-work, e.g., violent, pornographic, nude content) by both Twitter and a CLIP-based-NSFW model.
|
62 |
+
Therefore, we included two fields in the final TWIGMA dataset (`possibly_sensitive` and `nsfw_score` to help filter out these images.
|
63 |
+
Additionally, we note that the NSFW contents vary quite a bit by cluster labels and two of the clusters contain primarily NSFW contents.
|
64 |
+
""")
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
st.markdown("""
|
69 |
+
### References:
|
70 |
+
- CLIP-based NSFW Detector. 2022. https://github.com/LAION-AI/CLIP-based-NSFW-Detector. Accessed: 2023-06-06.
|
71 |
+
- Li, J.; Li, D.; Xiong, C.; and Hoi, S. 2022. BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, 12888β12900. PMLR.
|
72 |
+
- Chen, Y. and Zou J. 2023+. TWIGMA: A dataset of AI-Generated Images with Metadata From Twitter
|
73 |
+
""")
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
st.markdown("""---""")
|
78 |
+
st.markdown('Acknowledgement:')
|
79 |
+
st.caption('Huge thanks to Zhi Huang for sharing their code to query Twitter data; Federico for sharing their webplip webapp, which greatly inspired this website.')
|
80 |
+
st.markdown('Disclaimer:')
|
81 |
+
st.caption('Please be advised that this function has been developed in compliance with the Twitter policy of data usage and sharing. The use of this function is solely at your own risk and should be consistent with applicable laws, regulations, and ethical considerations. If you wish to review the original Twitter post, you should access the source page directly on Twitter.')
|
82 |
+
|
83 |
+
st.markdown('Privacy statement:')
|
84 |
+
st.caption('In accordance with the privacy and control policy of Twitter, we hereby declare that the data redistributed by us only comprise of Tweet IDs and non-identified metadata derived from the contents (e.g., clustering assignment based on images, image media id, predicted NSFW scores) but not the contents themselves (no text, hashtag, or image redistributed). The Tweet IDs could be used to retrieve the original Twitter post, as long as the original post is still accessible. The hyperlink will cease to function if the user deletes the original post. It is important to note that a substantial amount of tweets in our dataset have been classified as sensitive/NSFW by Twitter. Any distribution carried out must adhere to the regulations by Twitter, as well as laws and regulations applicable in your jurisdiction, including export control laws and embargoes.')
|
image2image.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
import requests
|
6 |
+
import tokenizers
|
7 |
+
import os
|
8 |
+
from io import BytesIO
|
9 |
+
import pickle
|
10 |
+
import base64
|
11 |
+
import datetime
|
12 |
+
|
13 |
+
import torch
|
14 |
+
from transformers import (
|
15 |
+
VisionTextDualEncoderModel,
|
16 |
+
AutoFeatureExtractor,
|
17 |
+
AutoTokenizer,
|
18 |
+
CLIPModel,
|
19 |
+
AutoProcessor
|
20 |
+
)
|
21 |
+
import streamlit.components.v1 as components
|
22 |
+
from st_clickable_images import clickable_images #pip install st-clickable-images
|
23 |
+
|
24 |
+
|
25 |
+
@st.cache(
|
26 |
+
hash_funcs={
|
27 |
+
torch.nn.parameter.Parameter: lambda _: None,
|
28 |
+
tokenizers.Tokenizer: lambda _: None,
|
29 |
+
tokenizers.AddedToken: lambda _: None
|
30 |
+
}
|
31 |
+
)
|
32 |
+
def load_path_clip():
|
33 |
+
model = CLIPModel.from_pretrained("vinid/plip")
|
34 |
+
processor = AutoProcessor.from_pretrained("vinid/plip")
|
35 |
+
return model, processor
|
36 |
+
|
37 |
+
@st.cache
|
38 |
+
def init():
|
39 |
+
with open('data/twitter.asset', 'rb') as f:
|
40 |
+
data = pickle.load(f)
|
41 |
+
meta = data['meta'].reset_index(drop=True)
|
42 |
+
image_embedding = data['image_embedding']
|
43 |
+
text_embedding = data['text_embedding']
|
44 |
+
print(meta.shape, image_embedding.shape)
|
45 |
+
validation_subset_index = meta['source'].values == 'Val_Tweets'
|
46 |
+
return meta, image_embedding, text_embedding, validation_subset_index
|
47 |
+
|
48 |
+
def embed_images(model, images, processor):
|
49 |
+
inputs = processor(images=images)
|
50 |
+
pixel_values = torch.tensor(np.array(inputs["pixel_values"]))
|
51 |
+
|
52 |
+
with torch.no_grad():
|
53 |
+
embeddings = model.get_image_features(pixel_values=pixel_values)
|
54 |
+
return embeddings
|
55 |
+
|
56 |
+
def embed_texts(model, texts, processor):
|
57 |
+
inputs = processor(text=texts, padding="longest")
|
58 |
+
input_ids = torch.tensor(inputs["input_ids"])
|
59 |
+
attention_mask = torch.tensor(inputs["attention_mask"])
|
60 |
+
|
61 |
+
with torch.no_grad():
|
62 |
+
embeddings = model.get_text_features(
|
63 |
+
input_ids=input_ids, attention_mask=attention_mask
|
64 |
+
)
|
65 |
+
return embeddings
|
66 |
+
def app():
|
67 |
+
st.title('Image to Image Retrieval')
|
68 |
+
st.markdown('#### A pathology image search engine that correlate images with images.')
|
69 |
+
st.markdown("Image-to-image retrieval can be used to retrieve pathology images that have contents similar to the target image input, with the ability to comprehend the key components from the input image.")
|
70 |
+
|
71 |
+
st.markdown('#### Demo')
|
72 |
+
|
73 |
+
meta, image_embedding, text_embedding, validation_subset_index = init()
|
74 |
+
model, processor = load_path_clip()
|
75 |
+
|
76 |
+
|
77 |
+
col1, col2 = st.columns(2)
|
78 |
+
with col1:
|
79 |
+
data_options = ["All twitter data (03/21/2006 β 01/15/2023)",
|
80 |
+
"Twitter validation data (11/16/2022 β 01/15/2023)"]
|
81 |
+
st.radio(
|
82 |
+
"Choose dataset for image retrieval π",
|
83 |
+
key="datapool",
|
84 |
+
options=data_options,
|
85 |
+
)
|
86 |
+
with col2:
|
87 |
+
retrieval_options = ["Image only",
|
88 |
+
"Text and image (beta)",
|
89 |
+
]
|
90 |
+
st.radio(
|
91 |
+
"Similarity calcuation π",
|
92 |
+
key="calculation_option",
|
93 |
+
options=retrieval_options,
|
94 |
+
)
|
95 |
+
|
96 |
+
|
97 |
+
st.markdown('Try out following examples:')
|
98 |
+
example_path = 'data/example_images'
|
99 |
+
list_of_examples = [os.path.join(example_path, v) for v in os.listdir(example_path)]
|
100 |
+
example_imgs = []
|
101 |
+
for file in list_of_examples:
|
102 |
+
with open(file, "rb") as image:
|
103 |
+
encoded = base64.b64encode(image.read()).decode()
|
104 |
+
example_imgs.append(f"data:image/jpeg;base64,{encoded}")
|
105 |
+
clicked = clickable_images(
|
106 |
+
example_imgs,
|
107 |
+
titles=[f"Image #{str(i)}" for i in range(len(example_imgs))],
|
108 |
+
div_style={"display": "flex", "justify-content": "center", "flex-wrap": "wrap"},
|
109 |
+
img_style={"margin": "5px", "height": "70px"},
|
110 |
+
)
|
111 |
+
isExampleClicked = False
|
112 |
+
if clicked > -1:
|
113 |
+
image = Image.open(list_of_examples[clicked])
|
114 |
+
isExampleClicked = True
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
col1, col2, _ = st.columns(3)
|
122 |
+
with col1:
|
123 |
+
query = st.file_uploader("Choose a file to upload")
|
124 |
+
|
125 |
+
|
126 |
+
proceed = False
|
127 |
+
if query:
|
128 |
+
image = Image.open(query)
|
129 |
+
proceed = True
|
130 |
+
elif isExampleClicked:
|
131 |
+
proceed = True
|
132 |
+
|
133 |
+
if proceed:
|
134 |
+
with col2:
|
135 |
+
st.image(image, caption='Your upload')
|
136 |
+
|
137 |
+
input_image = embed_images(model, [image], processor)[0].detach().cpu().numpy()
|
138 |
+
|
139 |
+
input_image = input_image/np.linalg.norm(input_image)
|
140 |
+
|
141 |
+
# Sort IDs by cosine-similarity from high to low
|
142 |
+
|
143 |
+
if st.session_state.calculation_option == retrieval_options[0]: # Image only
|
144 |
+
similarity_scores = input_image.dot(image_embedding.T)
|
145 |
+
else: # Text and Image
|
146 |
+
similarity_scores_i = input_image.dot(image_embedding.T)
|
147 |
+
similarity_scores_t = input_image.dot(text_embedding.T)
|
148 |
+
similarity_scores_i = similarity_scores_i/np.max(similarity_scores_i)
|
149 |
+
similarity_scores_t = similarity_scores_t/np.max(similarity_scores_t)
|
150 |
+
similarity_scores = (similarity_scores_i + similarity_scores_t)/2
|
151 |
+
|
152 |
+
|
153 |
+
############################################################
|
154 |
+
# Get top results
|
155 |
+
############################################################
|
156 |
+
topn = 5
|
157 |
+
df = pd.DataFrame(np.c_[np.arange(len(meta)), similarity_scores, meta['weblink'].values], columns = ['idx', 'score', 'twitterlink'])
|
158 |
+
if st.session_state.datapool == data_options[1]: #Use val twitter data
|
159 |
+
df = df.loc[validation_subset_index,:]
|
160 |
+
df = df.sort_values('score', ascending=False)
|
161 |
+
df = df.drop_duplicates(subset=['twitterlink'])
|
162 |
+
best_id_topk = df['idx'].values[:topn]
|
163 |
+
target_scores = df['score'].values[:topn]
|
164 |
+
target_weblinks = df['twitterlink'].values[:topn]
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
############################################################
|
169 |
+
# Display results
|
170 |
+
############################################################
|
171 |
+
|
172 |
+
st.markdown('#### Top 5 results:')
|
173 |
+
topk_options = ['1st', '2nd', '3rd', '4th', '5th']
|
174 |
+
tab = {}
|
175 |
+
tab[0], tab[1], tab[2] = st.columns(3)
|
176 |
+
for i in [0,1,2]:
|
177 |
+
with tab[i]:
|
178 |
+
topn_value = i
|
179 |
+
topn_txt = topk_options[i]
|
180 |
+
st.caption(f'The {topn_txt} relevant image (similarity = {target_scores[topn_value]:.4f})')
|
181 |
+
components.html('''
|
182 |
+
<blockquote class="twitter-tweet">
|
183 |
+
<a href="%s"></a>
|
184 |
+
</blockquote>
|
185 |
+
<script async src="https://platform.twitter.com/widgets.js" charset="utf-8">
|
186 |
+
</script>
|
187 |
+
''' % target_weblinks[topn_value],
|
188 |
+
height=600)
|
189 |
+
|
190 |
+
tab[3], tab[4], tab[5] = st.columns(3)
|
191 |
+
for i in [3,4]:
|
192 |
+
with tab[i]:
|
193 |
+
topn_value = i
|
194 |
+
topn_txt = topk_options[i]
|
195 |
+
st.caption(f'The {topn_txt} relevant image (similarity = {target_scores[topn_value]:.4f})')
|
196 |
+
components.html('''
|
197 |
+
<blockquote class="twitter-tweet">
|
198 |
+
<a href="%s"></a>
|
199 |
+
</blockquote>
|
200 |
+
<script async src="https://platform.twitter.com/widgets.js" charset="utf-8">
|
201 |
+
</script>
|
202 |
+
''' % target_weblinks[topn_value],
|
203 |
+
height=800)
|
204 |
+
|
205 |
+
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
|
210 |
+
|
211 |
+
|
212 |
+
|
213 |
+
|
214 |
+
st.markdown("""---""")
|
215 |
+
st.markdown('Disclaimer')
|
216 |
+
st.caption('Please be advised that this function has been developed in compliance with the Twitter policy of data usage and sharing. It is important to note that the results obtained from this function are not intended to constitute medical advice or replace consultation with a qualified medical professional. The use of this function is solely at your own risk and should be consistent with applicable laws, regulations, and ethical considerations. We do not warrant or guarantee the accuracy, completeness, suitability, or usefulness of this function for any particular purpose, and we hereby disclaim any liability arising from any reliance placed on this function or any results obtained from its use. If you wish to review the original Twitter post, you should access the source page directly on Twitter.')
|
217 |
+
|
218 |
+
st.markdown('Privacy statement')
|
219 |
+
st.caption('In accordance with the privacy and control policy of Twitter, we hereby declared that the data redistributed by us shall only comprise of Tweet IDs. The Tweet IDs will be employed to establish a linkage with the original Twitter post, as long as the original post is still accessible. The hyperlink will cease to function if the user deletes the original post. It is important to note that all tweets displayed on our service have already been classified as non-sensitive by Twitter. It is strictly prohibited to redistribute any content apart from the Tweet IDs. Any distribution carried out must adhere to the laws and regulations applicable in your jurisdiction, including export control laws and embargoes.')
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
|
232 |
+
|
233 |
+
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
|
238 |
+
|
239 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/openai/CLIP.git
|
2 |
+
torch
|
3 |
+
transformers
|
4 |
+
pandas==1.4.1
|
5 |
+
numpy
|
6 |
+
Pillow
|
7 |
+
streamlit==1.19.0
|
8 |
+
st_clickable_images
|
9 |
+
plotly
|
10 |
+
datetime
|
resources/.DS_Store
ADDED
Binary file (10.2 kB). View file
|
|
resources/4x/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
resources/4x/change_time.pdf
ADDED
Binary file (117 kB). View file
|
|
resources/4x/change_time.png
ADDED
Git LFS Details
|
resources/4x/cluster_change.png
ADDED
Git LFS Details
|
resources/4x/nsfw_fig.png
ADDED
Git LFS Details
|
resources/4x/test_figure_1_web.png
ADDED
Git LFS Details
|
resources/csv_files/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
resources/csv_files/twigma_release_sampled.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:acc126898ee8a1aaaa19796b8c94538aa50f4e9a8cb7f191d51ad7298ab9892e
|
3 |
+
size 13659204
|
text2image.py
ADDED
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
import pickle
|
6 |
+
import tokenizers
|
7 |
+
import torch
|
8 |
+
|
9 |
+
from transformers import (
|
10 |
+
|
11 |
+
CLIPModel,
|
12 |
+
AutoProcessor
|
13 |
+
)
|
14 |
+
import streamlit.components.v1 as components
|
15 |
+
import base64
|
16 |
+
|
17 |
+
def render_svg(svg_filename):
|
18 |
+
with open(svg_filename,"r") as f:
|
19 |
+
lines = f.readlines()
|
20 |
+
svg=''.join(lines)
|
21 |
+
"""Renders the given svg string."""
|
22 |
+
b64 = base64.b64encode(svg.encode('utf-8')).decode("utf-8")
|
23 |
+
html = r'<img src="data:image/svg+xml;base64,%s"/>' % b64
|
24 |
+
st.write(html, unsafe_allow_html=True)
|
25 |
+
|
26 |
+
@st.cache(
|
27 |
+
hash_funcs={
|
28 |
+
torch.nn.parameter.Parameter: lambda _: None,
|
29 |
+
tokenizers.Tokenizer: lambda _: None,
|
30 |
+
tokenizers.AddedToken: lambda _: None
|
31 |
+
}
|
32 |
+
)
|
33 |
+
def load_path_clip():
|
34 |
+
model = CLIPModel.from_pretrained("vinid/plip")
|
35 |
+
processor = AutoProcessor.from_pretrained("vinid/plip")
|
36 |
+
return model, processor
|
37 |
+
|
38 |
+
@st.cache
|
39 |
+
def init():
|
40 |
+
with open('data/twitter.asset', 'rb') as f:
|
41 |
+
data = pickle.load(f)
|
42 |
+
meta = data['meta'].reset_index(drop=True)
|
43 |
+
image_embedding = data['image_embedding']
|
44 |
+
text_embedding = data['text_embedding']
|
45 |
+
print(meta.shape, image_embedding.shape)
|
46 |
+
validation_subset_index = meta['source'].values == 'Val_Tweets'
|
47 |
+
return meta, image_embedding, text_embedding, validation_subset_index
|
48 |
+
|
49 |
+
def embed_images(model, images, processor):
|
50 |
+
inputs = processor(images=images)
|
51 |
+
pixel_values = torch.tensor(np.array(inputs["pixel_values"]))
|
52 |
+
|
53 |
+
with torch.no_grad():
|
54 |
+
embeddings = model.get_image_features(pixel_values=pixel_values)
|
55 |
+
return embeddings
|
56 |
+
|
57 |
+
def embed_texts(model, texts, processor):
|
58 |
+
inputs = processor(text=texts, padding="longest")
|
59 |
+
input_ids = torch.tensor(inputs["input_ids"])
|
60 |
+
attention_mask = torch.tensor(inputs["attention_mask"])
|
61 |
+
|
62 |
+
with torch.no_grad():
|
63 |
+
embeddings = model.get_text_features(
|
64 |
+
input_ids=input_ids, attention_mask=attention_mask
|
65 |
+
)
|
66 |
+
return embeddings
|
67 |
+
|
68 |
+
|
69 |
+
def app():
|
70 |
+
|
71 |
+
st.title('Text to Image Retrieval')
|
72 |
+
st.markdown('#### A pathology image search engine that geos from texts to images.')
|
73 |
+
|
74 |
+
col1, col2 = st.columns([1,1])
|
75 |
+
with col1:
|
76 |
+
st.markdown("The text-to-image retrieval system can serve as an image search engine, enabling users to match images from multiple queries and retrieve the most relevant image based on a sentence description. This generic system can comprehend semantic and interrelated knowledge, such as βBreast tumor surrounded by fatβ.")
|
77 |
+
st.markdown("Unlike searching keywords and sentences from Google and indirectly matching the images from the target text, our proposed pathology image retrieval allows direct comparison between input sentences and images.")
|
78 |
+
with col2:
|
79 |
+
render_svg("resources/SVG/Asset 54.svg")
|
80 |
+
|
81 |
+
meta, image_embedding, text_embedding, validation_subset_index = init()
|
82 |
+
model, processor = load_path_clip()
|
83 |
+
|
84 |
+
st.markdown('### Search')
|
85 |
+
st.markdown('How to use this: first of all, select a dataset on which to do retrieval.\n'
|
86 |
+
'Then, either select a predefined search query or input one yourself.')
|
87 |
+
|
88 |
+
|
89 |
+
col1, col2 = st.columns(2)
|
90 |
+
with col1:
|
91 |
+
data_options = ["All Twitter Data (03/21/2006 β 01/15/2023)",
|
92 |
+
"Validation Twitter data (11/16/2022 β 01/15/2023)"]
|
93 |
+
st.selectbox(
|
94 |
+
"Dataset",
|
95 |
+
key="datapool",
|
96 |
+
options=data_options,
|
97 |
+
)
|
98 |
+
|
99 |
+
with col2:
|
100 |
+
retrieval_options = ["Image only",
|
101 |
+
"Text and image (beta)",
|
102 |
+
]
|
103 |
+
st.radio(
|
104 |
+
"Similarity calcuation π",
|
105 |
+
key="calculation_option",
|
106 |
+
options=retrieval_options,
|
107 |
+
)
|
108 |
+
|
109 |
+
col1, col2 = st.columns(2)
|
110 |
+
|
111 |
+
with col1:
|
112 |
+
# Create selectbox
|
113 |
+
examples = ['Breast tumor surrounded by fat',
|
114 |
+
'HER2+ breast tumor',
|
115 |
+
'Colorectal cancer tumor on epithelium',
|
116 |
+
'An image of endometrium epithelium',
|
117 |
+
'Breast cancer DCIS',
|
118 |
+
'Papillary carcinoma in breast tissue',
|
119 |
+
]
|
120 |
+
query_1 = st.selectbox("Select an example", options=examples)
|
121 |
+
|
122 |
+
col1_submit = True
|
123 |
+
|
124 |
+
with col2:
|
125 |
+
form = st.form(key='my_form')
|
126 |
+
query_2 = form.text_input(label='Or input your custom query:')
|
127 |
+
submit_button = form.form_submit_button(label='Submit')
|
128 |
+
|
129 |
+
if submit_button:
|
130 |
+
col1_submit = False
|
131 |
+
|
132 |
+
if col1_submit:
|
133 |
+
query = query_1
|
134 |
+
else:
|
135 |
+
query = query_2
|
136 |
+
|
137 |
+
input_text = embed_texts(model, [query], processor)[0].detach().cpu().numpy()
|
138 |
+
input_text = input_text/np.linalg.norm(input_text)
|
139 |
+
|
140 |
+
# Sort IDs by cosine-similarity from high to low
|
141 |
+
|
142 |
+
if st.session_state.calculation_option == retrieval_options[0]: # Image only
|
143 |
+
similarity_scores = input_text.dot(image_embedding.T)
|
144 |
+
else: # Text and Image
|
145 |
+
similarity_scores_i = input_text.dot(image_embedding.T)
|
146 |
+
similarity_scores_t = input_text.dot(text_embedding.T)
|
147 |
+
similarity_scores_i = similarity_scores_i / np.max(similarity_scores_i)
|
148 |
+
similarity_scores_t = similarity_scores_t / np.max(similarity_scores_t)
|
149 |
+
similarity_scores = (similarity_scores_i + similarity_scores_t) / 2
|
150 |
+
|
151 |
+
############################################################
|
152 |
+
# Get top results
|
153 |
+
############################################################
|
154 |
+
topn = 5
|
155 |
+
df = pd.DataFrame(np.c_[np.arange(len(meta)), similarity_scores, meta['weblink'].values], columns = ['idx', 'score', 'twitterlink'])
|
156 |
+
if st.session_state.datapool == data_options[1]: #Use val twitter data
|
157 |
+
df = df.loc[validation_subset_index,:]
|
158 |
+
df = df.sort_values('score', ascending=False)
|
159 |
+
df = df.drop_duplicates(subset=['twitterlink'])
|
160 |
+
best_id_topk = df['idx'].values[:topn]
|
161 |
+
target_scores = df['score'].values[:topn]
|
162 |
+
target_weblinks = df['twitterlink'].values[:topn]
|
163 |
+
|
164 |
+
|
165 |
+
############################################################
|
166 |
+
# Display results
|
167 |
+
############################################################
|
168 |
+
|
169 |
+
text = '<font size="4">Your input query: <span style="background-color: rgb(230,230,230);"><b>%s</b></span>' % query + \
|
170 |
+
' (Try search it directly on [Twitter](https://twitter.com/search?q=%s&src=typed_query) or [Google](https://www.google.com/search?q=%s))</font>' % (query.replace(' ', '%20'), query.replace(' ', '+'))
|
171 |
+
st.markdown(text, unsafe_allow_html=True)
|
172 |
+
|
173 |
+
st.markdown('#### Top 5 results:')
|
174 |
+
topk_options = ['1st', '2nd', '3rd', '4th', '5th']
|
175 |
+
tab = {}
|
176 |
+
tab[0], tab[1], tab[2] = st.columns(3)
|
177 |
+
for i in [0,1,2]:
|
178 |
+
with tab[i]:
|
179 |
+
topn_value = i
|
180 |
+
topn_txt = topk_options[i]
|
181 |
+
st.caption(f'The {topn_txt} relevant image (similarity = {target_scores[topn_value]:.4f})')
|
182 |
+
components.html('''
|
183 |
+
<blockquote class="twitter-tweet">
|
184 |
+
<a href="%s"></a>
|
185 |
+
</blockquote>
|
186 |
+
<script async src="https://platform.twitter.com/widgets.js" charset="utf-8">
|
187 |
+
</script>
|
188 |
+
''' % target_weblinks[topn_value],
|
189 |
+
height=600)
|
190 |
+
|
191 |
+
tab[3], tab[4], tab[5] = st.columns(3)
|
192 |
+
for i in [3,4]:
|
193 |
+
with tab[i]:
|
194 |
+
topn_value = i
|
195 |
+
topn_txt = topk_options[i]
|
196 |
+
st.caption(f'The {topn_txt} relevant image (similarity = {target_scores[topn_value]:.4f})')
|
197 |
+
components.html('''
|
198 |
+
<blockquote class="twitter-tweet">
|
199 |
+
<a href="%s"></a>
|
200 |
+
</blockquote>
|
201 |
+
<script async src="https://platform.twitter.com/widgets.js" charset="utf-8">
|
202 |
+
</script>
|
203 |
+
''' % target_weblinks[topn_value],
|
204 |
+
height=800)
|
205 |
+
|
206 |
+
|
207 |
+
|
208 |
+
st.markdown("""---""")
|
209 |
+
st.markdown('Disclaimer')
|
210 |
+
st.caption('Please be advised that this function has been developed in compliance with the Twitter policy of data usage and sharing. It is important to note that the results obtained from this function are not intended to constitute medical advice or replace consultation with a qualified medical professional. The use of this function is solely at your own risk and should be consistent with applicable laws, regulations, and ethical considerations. We do not warrant or guarantee the accuracy, completeness, suitability, or usefulness of this function for any particular purpose, and we hereby disclaim any liability arising from any reliance placed on this function or any results obtained from its use. If you wish to review the original Twitter post, you should access the source page directly on Twitter.')
|
211 |
+
|
212 |
+
st.markdown('Privacy statement')
|
213 |
+
st.caption('In accordance with the privacy and control policy of Twitter, we hereby declared that the data redistributed by us shall only comprise of Tweet IDs. The Tweet IDs will be employed to establish a linkage with the original Twitter post, as long as the original post is still accessible. The hyperlink will cease to function if the user deletes the original post. It is important to note that all tweets displayed on our service have already been classified as non-sensitive by Twitter. It is strictly prohibited to redistribute any content apart from the Tweet IDs. Any distribution carried out must adhere to the laws and regulations applicable in your jurisdiction, including export control laws and embargoes.')
|
214 |
+
|
215 |
+
|
216 |
+
|
217 |
+
|
218 |
+
|
219 |
+
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
|
226 |
+
|
visualization.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
import streamlit as st
|
3 |
+
import streamlit.components.v1 as components
|
4 |
+
import plotly.figure_factory as ff
|
5 |
+
import numpy as np
|
6 |
+
import pandas as pd
|
7 |
+
import requests
|
8 |
+
import plotly.express as px
|
9 |
+
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
#from streamlit_plotly_events import plotly_events
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
def app():
|
17 |
+
|
18 |
+
st.markdown('### Themes of AI-generated images on Twitter')
|
19 |
+
st.markdown("""
|
20 |
+
To investigate the themes and the corresponding longitudinal changes for images in the TWIGMA dataset, we applied k-means clustering to the TWIGMA
|
21 |
+
data with k = 10 and plotted the change of cluster membership over time (see Figure 2).
|
22 |
+
We note substantial changes in cluster memberships: clusters 1, 3, and 4 have experienced a
|
23 |
+
steady decline over time, while clusters 8 and 9 showed a consistent increase. Due to the lack of direct access to the prompts used for
|
24 |
+
generating every image in TWIGMA, as well as the distance between the input prompt and the resulting output,
|
25 |
+
we used BLIP (Li et al. 2022) to caption the images in TWIGMA. Prominent themes we observed include painting, woman, man, and hair, aligning with the known interest of users in generating detailed human portraits in
|
26 |
+
various styles using text-to-image models.
|
27 |
+
""")
|
28 |
+
|
29 |
+
img_2d_embed = pd.read_csv("./resources/csv_files/twigma_release_sampled.csv")
|
30 |
+
img_2d_embed = img_2d_embed.sample(frac=0.5, random_state=2023)
|
31 |
+
|
32 |
+
# txt_2d_embed = pd.read_csv('data/txt_2d_embedding.csv', index_col=0)
|
33 |
+
# txt_2d_embed = txt_2d_embed.sample(frac=0.1, random_state=0)
|
34 |
+
|
35 |
+
col1, col2 = st.columns([1,1])
|
36 |
+
with col1:
|
37 |
+
fig1 = ff.create_2d_density(
|
38 |
+
x=img_2d_embed['UMAP_dim_1'],
|
39 |
+
y=img_2d_embed['UMAP_dim_2'],
|
40 |
+
#colors=img_2d_embed['tag'],
|
41 |
+
colorscale='Greens', # set the color map
|
42 |
+
height=600, # set height of the figure
|
43 |
+
width=600, # set width of the figure
|
44 |
+
title='Figure 1: Image embedding visualized using 2D UMAP'
|
45 |
+
)
|
46 |
+
#selected_points = plotly_events(fig1, click_event=True, hover_event=True)
|
47 |
+
st.plotly_chart(fig1, use_container_width=True)
|
48 |
+
with col2:
|
49 |
+
fig4 = Image.open('./resources/4x/change_time.png')
|
50 |
+
#selected_points = plotly_events(fig1, click_event=True, hover_event=True)
|
51 |
+
st.image(fig4, output_format='png',
|
52 |
+
caption='Figure 2: Composition of image clusters (estimated using k-means clustering with k = 10) \
|
53 |
+
over time. We observe notable changes in cluster membership and underlying themes of \
|
54 |
+
TWIGMA images from 2021 to 2023.')
|
55 |
+
|
56 |
+
st.markdown('#### Visualizing the concept of each cluster')
|
57 |
+
st.markdown("""
|
58 |
+
We further visualized each cluster along with the most frequent topics derived using BLIP captions in Figure 3,
|
59 |
+
with emojis representing the relative trends over time. Our findings indicate a shift in preferences for image topics among Twitter users.
|
60 |
+
There is a growing interest in sharing artistically sophisticated or distinct content, such as intricate human portraits, while interest in simpler themes such as natural scenes has declined. In addition, clusters 5 and 8 notably contain a substantial number of images with increas- ing popularity but also significant amounts of NSFW, porno- graphic, and nude content.
|
61 |
+
This observation aligns with recent studies that highlight the rapid growth of online communities focused on generating these models, which are relatively less regulated
|
62 |
+
""")
|
63 |
+
|
64 |
+
|
65 |
+
col1, col2, col3 = st.columns([1,6,1])
|
66 |
+
with col2:
|
67 |
+
fig2 = Image.open('./resources/4x/cluster_change.png')
|
68 |
+
st.image(fig2, caption='Figure 3: Randomly sampled images from the 10 clusters \
|
69 |
+
identified using k-means clustering. Clusters 5 and 8 predominantly contain NSFW \
|
70 |
+
photos and have been pixelated accordingly. Cluster annotations are \
|
71 |
+
provided using the most frequent words in associated BLIP-inferred captions, \
|
72 |
+
with up arrow, down arrow, and right arrow emojis indicating increasing, decreasing, or unchanged trends over time, respectively.', output_format='png')
|
73 |
+
img_2d_embed_small = img_2d_embed.sample(frac=0.3, random_state=2023)
|
74 |
+
# Define the desired order of the cluster labels
|
75 |
+
cluster_order = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
|
76 |
+
|
77 |
+
img_2d_embed_small = img_2d_embed_small.sort_values(['label_10_clusters'])
|
78 |
+
|
79 |
+
col1, col2, col3 = st.columns([1,6,1])
|
80 |
+
with col2:
|
81 |
+
fig3 = px.density_contour(
|
82 |
+
img_2d_embed_small,
|
83 |
+
x='UMAP_dim_1',
|
84 |
+
y='UMAP_dim_2',
|
85 |
+
facet_col_wrap=5,
|
86 |
+
color = 'label_10_clusters',
|
87 |
+
facet_col='label_10_clusters',
|
88 |
+
color_discrete_sequence=px.colors.qualitative.Safe,
|
89 |
+
title='Figure 4: Image embedding visualized in 2D UMAP by estimated clusters')
|
90 |
+
st.plotly_chart(fig3, use_container_width=True)
|
91 |
+
|
92 |
+
|
93 |
+
# Rename the x and y axes
|
94 |
+
# fig3.update_xaxes(title='UMAP 1')
|
95 |
+
# fig3.update_yaxes(title='UMAP 2')
|
96 |
+
|
97 |
+
# st.plotly_chart(fig3)
|
98 |
+
|
99 |
+
|
viz_scripts/calc_img.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Fri Mar 10 21:13:04 2023
|
5 |
+
|
6 |
+
@author: zhihuang
|
7 |
+
"""
|
8 |
+
|
9 |
+
import pickle
|
10 |
+
import os
|
11 |
+
import pandas as pd
|
12 |
+
import numpy as np
|
13 |
+
import umap
|
14 |
+
import seaborn as sns
|
15 |
+
import matplotlib.pyplot as plt
|
16 |
+
opj=os.path.join
|
17 |
+
|
18 |
+
if __name__ == '__main__':
|
19 |
+
dd = '/home/zhihuang/Desktop/webplip/data'
|
20 |
+
with open(opj(dd, 'twitter.asset'),'rb') as f:
|
21 |
+
data = pickle.load(f)
|
22 |
+
|
23 |
+
n_neighbors = 15
|
24 |
+
random_state = 0
|
25 |
+
|
26 |
+
reducer = umap.UMAP(n_components=2,
|
27 |
+
n_neighbors=n_neighbors,
|
28 |
+
min_dist=0.1,
|
29 |
+
metric='euclidean',
|
30 |
+
random_state=random_state)
|
31 |
+
img_2d = reducer.fit(data['image_embedding'])
|
32 |
+
img_2d = reducer.transform(data['image_embedding'])
|
33 |
+
df_img = pd.DataFrame(np.c_[img_2d, data['meta'].values], columns = ['UMAP_1','UMAP_2'] + list(data['meta'].columns))
|
34 |
+
df_img.to_csv(opj(dd, 'img_2d_embedding.csv'))
|
35 |
+
|
36 |
+
|
37 |
+
# reducer = umap.UMAP(n_components=2,
|
38 |
+
# n_neighbors=n_neighbors,
|
39 |
+
# min_dist=0.1,
|
40 |
+
# metric='euclidean',
|
41 |
+
# random_state=random_state)
|
42 |
+
txt_2d = reducer.fit_transform(data['text_embedding'])
|
43 |
+
df_txt = pd.DataFrame(np.c_[txt_2d, data['meta'].values], columns = ['UMAP_1','UMAP_2'] + list(data['meta'].columns))
|
44 |
+
df_txt.to_csv(opj(dd, 'txt_2d_embedding.csv'))
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
fig, ax = plt.subplots(1,2, figsize=(20,10))
|
50 |
+
sns.scatterplot(data=df_img,
|
51 |
+
x='UMAP_1',
|
52 |
+
y='UMAP_2',
|
53 |
+
alpha=0.2,
|
54 |
+
ax=ax[0],
|
55 |
+
hue='tag'
|
56 |
+
)
|
57 |
+
|
58 |
+
sns.scatterplot(data=df_txt,
|
59 |
+
x='UMAP_1',
|
60 |
+
y='UMAP_2',
|
61 |
+
alpha=0.2,
|
62 |
+
ax=ax[1],
|
63 |
+
hue='tag'
|
64 |
+
)
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
|