|
import os |
|
import re |
|
import streamlit as st |
|
import subprocess |
|
from zipfile import ZipFile |
|
from PIL import Image, ImageFilter, UnidentifiedImageError |
|
import pandas as pd |
|
import io |
|
|
|
|
|
sth = "https___nhentai_net_g_" |
|
parquet_file = "nsfw_classification_results.parquet" |
|
log_file = "app_log.txt" |
|
|
|
|
|
def sort_files_by_page_number(file_list): |
|
def extract_page_number(filename): |
|
match = re.search(r'page_(\d+)\.(jpg|png)', filename) |
|
if match: |
|
return int(match.group(1)) |
|
return 0 |
|
return sorted(file_list, key=extract_page_number) |
|
|
|
|
|
def get_image_folders(base_folder='scraped_images'): |
|
if not os.path.exists(base_folder): |
|
os.makedirs(base_folder) |
|
|
|
folder_paths = [os.path.join(base_folder, f) for f in os.listdir(base_folder) if os.path.isdir(os.path.join(base_folder, f))] |
|
|
|
|
|
folder_info = [] |
|
for folder_path in folder_paths: |
|
mtime = os.path.getmtime(folder_path) |
|
folder_name = os.path.basename(folder_path) |
|
|
|
if folder_name.startswith(sth): |
|
folder_name = folder_name.replace(sth, "") |
|
folder_info.append((folder_name, mtime, folder_path)) |
|
|
|
|
|
folder_info.sort(key=lambda x: x[1], reverse=True) |
|
|
|
|
|
sorted_folders = [info[0] for info in folder_info] |
|
|
|
return sorted_folders |
|
|
|
|
|
def create_zip_of_folder(folder_path, zip_name): |
|
with ZipFile(zip_name, 'w') as zipf: |
|
for root, dirs, files in os.walk(folder_path): |
|
for file in files: |
|
zipf.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), folder_path)) |
|
|
|
|
|
def run_subprocess(command): |
|
process = subprocess.Popen( |
|
command, |
|
stdout=subprocess.PIPE, |
|
stderr=subprocess.PIPE, |
|
text=True |
|
) |
|
process.wait() |
|
|
|
|
|
|
|
def load_parquet_data(parquet_file): |
|
if os.path.exists(parquet_file): |
|
return pd.read_parquet(parquet_file) |
|
else: |
|
st.error(f"{parquet_file} が見つかりません。スクレイピング後にインデックスが作成される必要があります。") |
|
return None |
|
|
|
|
|
def apply_gaussian_blur_if_unsafe(image, label, show_unsafe): |
|
label = label.lower() |
|
if label == "unsafe" and not show_unsafe: |
|
blurred_image = image.filter(ImageFilter.GaussianBlur(18)) |
|
img_byte_arr = io.BytesIO() |
|
blurred_image.save(img_byte_arr, format='PNG') |
|
img_byte_arr = img_byte_arr.getvalue() |
|
return img_byte_arr |
|
else: |
|
img_byte_arr = io.BytesIO() |
|
image.save(img_byte_arr, format='PNG') |
|
img_byte_arr = img_byte_arr.getvalue() |
|
return img_byte_arr |
|
|
|
|
|
def open_folder(folder): |
|
st.session_state['selected_folder'] = folder |
|
st.session_state['current_view'] = 'Selected Folder' |
|
|
|
|
|
st.title('画像ギャラリーとダウンロード') |
|
|
|
|
|
if 'show_unsafe' not in st.session_state: |
|
st.session_state['show_unsafe'] = False |
|
|
|
st.session_state['show_unsafe'] = st.checkbox('Unsafe画像をブラーなしで表示', value=st.session_state['show_unsafe']) |
|
|
|
|
|
df = load_parquet_data(parquet_file) |
|
|
|
|
|
url = st.text_input('スクレイピングするURLを入力してください', '') |
|
|
|
|
|
views = ["Gallery", "Logs", "Selected Folder"] |
|
|
|
|
|
if 'current_view' not in st.session_state: |
|
st.session_state['current_view'] = 'Gallery' |
|
|
|
|
|
selected_view = st.radio("ビューを選択", views, index=views.index(st.session_state['current_view'])) |
|
|
|
|
|
if selected_view != st.session_state['current_view']: |
|
st.session_state['current_view'] = selected_view |
|
|
|
|
|
if st.session_state['current_view'] == "Gallery": |
|
st.header("ギャラリー") |
|
|
|
if st.button('スクレイピングを開始'): |
|
if url: |
|
|
|
open(log_file, 'w').close() |
|
|
|
run_subprocess(["python3", "scrape_images_worker.py", url]) |
|
run_subprocess(["python3", "indexer.py"]) |
|
st.success("スクレイピングとインデックス作成が完了しました。") |
|
|
|
|
|
folders = get_image_folders() |
|
|
|
if folders: |
|
col1, col2 = st.columns(2) |
|
if 'selected_folder' not in st.session_state: |
|
st.session_state['selected_folder'] = None |
|
|
|
for i, folder in enumerate(folders): |
|
if "http" in folder: |
|
folder_path = os.path.join('scraped_images', folder) |
|
else: |
|
folder_path = os.path.join('scraped_images', sth + folder) |
|
image_files = [f for f in os.listdir(folder_path) if f.endswith(('jpg', 'png'))] |
|
image_files = sort_files_by_page_number(image_files) |
|
|
|
if image_files: |
|
if i % 2 == 0: |
|
with col1: |
|
st.image(os.path.join(folder_path, image_files[0]), caption=f"{folder} - 1ページ目", use_column_width=True) |
|
st.button(f'{folder} を開く', key=f"open_{folder}_1", on_click=open_folder, args=(folder,)) |
|
else: |
|
with col2: |
|
st.image(os.path.join(folder_path, image_files[0]), caption=f"{folder} - 1ページ目", use_column_width=True) |
|
st.button(f'{folder} を開く', key=f"open_{folder}_2", on_click=open_folder, args=(folder,)) |
|
else: |
|
st.write('画像フォルダが見つかりません。') |
|
|
|
|
|
elif st.session_state['current_view'] == "Logs": |
|
st.header("ログ") |
|
|
|
if os.path.exists(log_file): |
|
|
|
with open(log_file, 'rb') as log_file_binary: |
|
log_bytes = log_file_binary.read() |
|
st.download_button( |
|
label="ログをダウンロード", |
|
data=log_bytes, |
|
file_name="app_log.txt", |
|
mime="text/plain" |
|
) |
|
with open(log_file, 'r') as log_file: |
|
st.markdown(f"```{log_file}\n{log_file.read()}```") |
|
else: |
|
st.write("ログがありません。スクレイピングを開始してください。") |
|
|
|
|
|
elif st.session_state['current_view'] == "Selected Folder": |
|
st.header("選択されたフォルダ") |
|
|
|
if 'selected_folder' in st.session_state and st.session_state['selected_folder']: |
|
selected_folder = st.session_state['selected_folder'] |
|
|
|
if "http" in selected_folder: |
|
folder_path = os.path.join('scraped_images', selected_folder) |
|
else: |
|
folder_path = os.path.join('scraped_images', sth + selected_folder) |
|
|
|
st.subheader(f"フォルダ: {selected_folder} の画像一覧") |
|
|
|
if df is not None: |
|
image_files = [f for f in os.listdir(folder_path) if f.endswith(('jpg', 'png'))] |
|
image_files = sort_files_by_page_number(image_files) |
|
|
|
if image_files: |
|
for image_file in image_files: |
|
image_path = os.path.join(folder_path, image_file) |
|
label_row = df[df['file_path'] == image_path] |
|
|
|
if not label_row.empty: |
|
label = label_row['label'].values[0] |
|
else: |
|
label = "Unknown" |
|
|
|
try: |
|
image = Image.open(image_path) |
|
img_byte_arr = apply_gaussian_blur_if_unsafe(image, label, st.session_state['show_unsafe']) |
|
st.image(img_byte_arr, caption=f"{image_file} - {label}", use_column_width=True) |
|
except UnidentifiedImageError: |
|
st.error(f"🚫 画像ファイルを識別できません: {image_file}") |
|
continue |
|
else: |
|
st.warning("選択されたフォルダに画像が存在しません。") |
|
|
|
zip_name = f'{selected_folder}.zip' |
|
if st.button('画像をダウンロード'): |
|
create_zip_of_folder(folder_path, zip_name) |
|
with open(zip_name, 'rb') as f: |
|
st.download_button('ダウンロード', f, file_name=zip_name) |
|
else: |
|
st.write('画像フォルダが選択されていません。Galleryビューでフォルダを選択してください。') |
|
|