#通过modelscope接口对问题图片予以删除,保证过审 import base64 import json import os from io import BytesIO import pandas as pd from PIL import Image from dotenv import load_dotenv import requests from transformers import pipeline def get_nsfw_score(image_path:str,model:"模型")->float: #输入图片和模型,返回是否有问题 img = Image.open(image_path) result = model(images=img) nsfw_score = next((item['score'] for item in result if item['label']=='nsfw'),None) return nsfw_score if __name__ == '__main__': load_dotenv() model = pipeline("image-classification", model="Falconsai/nsfw_image_detection")#加载模型 # 获取当前目录的子目录的路径 img_path = 'manga' subdir_path = os.path.join(os.getcwd(), img_path) # 图片素材获取(包含子目录下所有图片) image_files = [] for root, dirs, files in os.walk(subdir_path): for file in files: if file.endswith(".jpg") or file.endswith(".png"): image_files.append(os.path.relpath(os.path.join(root, file))) for image_path in image_files: result = get_nsfw_score(image_path)#返回float的得分 if result> 0.5: print("发现问题图片,需要删除以过审:",image_path) os.remove(image_path) else: print(image_path, "图片没有问题")