Spaces:
Sleeping
Sleeping
File size: 5,992 Bytes
d9ae9e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import streamlit as st
import requests
import pandas as pd
from PIL import Image
import io
from io import BytesIO
def check_image_accessibility(url):
try:
response = requests.get(url, timeout=5)
if response.status_code == 200:
try:
image = Image.open(BytesIO(response.content))
# Verify if image can be displayed by checking its mode and size
if image.mode in ['RGB', 'RGBA', 'L'] and image.size[0] > 0 and image.size[1] > 0:
return "Accessible", image
else:
return "Not Accessible", None
except Exception:
return "Not Accessible", None
else:
return "Not Accessible", None
except Exception:
return "Not Accessible", None
def analyze_image(image_file):
# TinEye API endpoint
URL = "https://tineye.com/api/v1/result_json/?sort=score&order=desc"
# Read image file
#image_data = image_file.read()
image_data = image_file.getvalue()
# Prepare files for API request
files = {'image': ("test.png", image_data)}
# Make API request
response = requests.post(URL, files=files)
output = response.json()
# Extract data from response
urls = []
backlinks = []
crawl_dates = []
image_urls = []
image_statuses = []
images = []
for match in output['matches']:
for backlink in match['backlinks']:
urls.append(backlink['url'])
backlinks.append(backlink['backlink'])
crawl_dates.append(backlink['crawl_date'])
image_urls.append(match['image_url'])
# Check image accessibility
status, image = check_image_accessibility(match['image_url'])
image_statuses.append(status)
images.append(image)
# Create DataFrame
df = pd.DataFrame({
'URL': urls,
'Backlink': backlinks,
'Crawl Date': crawl_dates,
'Image URL': image_urls,
'Image Status': image_statuses
})
# Sort by crawl date
df = df.sort_values(by='Crawl Date', ascending=True)
return df, output['first_crawl_date'], images
def main():
st.title("Image Analysis Tool")
st.header("Image Analysis")
uploaded_file = st.file_uploader("Choose an image file", type=['png', 'jpg', 'jpeg'])
if uploaded_file is not None:
# Display the uploaded image
image = Image.open(uploaded_file)
st.image(image, caption='Uploaded Image', use_column_width=True)
if st.button("Analyze Image"):
with st.spinner("Analyzing image..."):
try:
df, first_crawl_date, images = analyze_image(uploaded_file)
# Display first crawl date
st.subheader("First Found Date")
st.write(f"This image was first found on: {first_crawl_date}")
# Display matches with images and details
st.subheader("Matched Images")
for idx, row in df.iterrows():
# Create a container for each match
with st.container():
# Display the matched image if available and accessible
if row['Image Status'] == "Accessible" and images[idx] is not None:
try:
st.image(images[idx], use_column_width=True)
except Exception:
# If image display fails, update status
df.at[idx, 'Image Status'] = "Not Accessible"
st.warning("Image not accessible")
else:
st.warning("Image not accessible")
# Display details in a clean format
st.markdown("---")
st.markdown(f"**Match {idx + 1} Details:**")
st.markdown(f"**URL:** {row['URL']}")
st.markdown(f"**Backlink:** {row['Backlink']}")
st.markdown(f"**Crawl Date:** {row['Crawl Date']}")
st.markdown(f"**Image Status:** {row['Image Status']}")
st.markdown("---")
# Add a separator before the DataFrame
st.markdown("---")
st.markdown("### Complete Results Table")
# Display the DataFrame with clickable links
st.dataframe(
df,
use_container_width=True,
hide_index=True,
column_config={
"URL": st.column_config.LinkColumn("URL"),
"Backlink": st.column_config.LinkColumn("Backlink"),
"Crawl Date": st.column_config.DateColumn("Crawl Date"),
"Image URL": st.column_config.LinkColumn("Image URL"),
"Image Status": st.column_config.TextColumn("Image Status")
}
)
# Download button for results
csv = df.to_csv(index=False)
st.download_button(
label="Download Results as CSV",
data=csv,
file_name="image_analysis_results.csv",
mime="text/csv"
)
except Exception as e:
st.error(f"An error occurred: {str(e)}")
if __name__ == "__main__":
main() |