import requests
import os
import gradio as gr
from huggingface_hub import HfApi, update_repo_visibility
from slugify import slugify
import gradio as gr
import re
import uuid
from typing import Optional
import json
def get_json_data(url):
api_url = f"https://civitai.com/api/v1/models/{url.split('/')[4]}"
try:
response = requests.get(api_url)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error fetching JSON data: {e}")
return None
def check_nsfw(json_data):
if json_data["nsfw"]:
return False
for model_version in json_data["modelVersions"]:
for image in model_version["images"]:
if image["nsfw"] != "None":
return False
return True
def extract_info(json_data):
if json_data["type"] == "LORA":
for model_version in json_data["modelVersions"]:
if model_version["baseModel"] in ["SDXL 1.0", "SDXL 0.9"]:
for file in model_version["files"]:
if file["primary"]:
# Start by adding the primary file to the list
urls_to_download = [{"url": file["downloadUrl"], "filename": file["name"], "type": "weightName"}]
# Then append all image URLs to the list
for image in model_version["images"]:
urls_to_download.append({
"url": image["url"],
"filename": os.path.basename(image["url"]),
"type": "imageName",
"prompt": image["meta"]["prompt"]
})
info = {
"urls_to_download": urls_to_download,
"id": model_version["id"],
"modelId": model_version["modelId"],
"name": json_data["name"],
"description": json_data["description"],
"trainedWords": model_version["trainedWords"],
"creator": json_data["creator"]["username"],
"tags": json_data["tags"]
}
return info
return None
def download_files(info, folder="."):
downloaded_files = {
"imageName": [],
"imagePrompt": [],
"weightName": []
}
for item in info["urls_to_download"]:
download_file(item["url"], item["filename"], folder)
downloaded_files[item["type"]].append(item["filename"])
if(item["type"] == "imageName"):
prompt_clean = re.sub(r'<.*?>', '', item["prompt"])
downloaded_files["imagePrompt"].append(prompt_clean)
return downloaded_files
def download_file(url, filename, folder="."):
try:
response = requests.get(url)
response.raise_for_status()
with open(f"{folder}/{filename}", 'wb') as f:
f.write(response.content)
except requests.exceptions.RequestException as e:
raise gr.Error(f"Error downloading file: {e}")
def process_url(url, do_download=True, folder="."):
json_data = get_json_data(url)
if json_data:
if check_nsfw(json_data):
info = extract_info(json_data)
if info:
if(do_download):
downloaded_files = download_files(info, folder)
else:
downloaded_files = []
return info, downloaded_files
else:
raise gr.Error("Only SDXL LoRAs are supported for now")
else:
raise gr.Error("This model has content tagged as unsafe by CivitAI")
else:
raise gr.Error("Something went wrong in fetching CivitAI API")
def create_readme(info, downloaded_files, is_author=True, folder="."):
readme_content = ""
original_url = f"https://civitai.com/models/{info['id']}"
non_author_disclaimer = f'This model was originally uploaded on [CivitAI]({original_url}), by [{info["creator"]}](https://civitai.com/user/{info["creator"]}/models). The information below was provided by the author on CivitAI:'
default_tags = ["text-to-image", "stable-diffusion", "lora", "diffusers"]
civit_tags = [t for t in info["tags"] if t not in default_tags]
widget_prompts = "\n- text:".join(downloaded_files["imagePrompt"])
tags = default_tags + civit_tags
unpacked_tags = "\n-".join(tags)
content = f"""---
license: other
tags:{unpacked_tags}
base_model: stabilityai/stable-diffusion-xl-base-1.0
instance_prompt: {info['trainedWords'][0] if 'trainedWords' in info and len(info['trainedWords']) > 0 else ''}
widget:{widget_prompts}
---
# {info["name"]}
{non_author_disclaimer if not is_author else ''}
![Image 0]({downloaded_files["imageName"][0]})
{info["description"]}
"""
for index, (image, prompt) in enumerate(zip(downloaded_files["imageName"], downloaded_files["imagePrompt"])):
if index == 1:
content += f"## Image examples for the model:\n![Image {index}]({image})\n> Prompt: {prompt}"
elif index > 1:
content += f"\n![Image {index}]({image})\n> Prompt: {prompt}"
readme_content += content + "\n"
print(readme_content)
with open(f"{folder}/README.md", "w") as file:
file.write(readme_content)
def get_creator(username):
url = f"https://civitai.com/api/trpc/user.getCreator?input=%7B%22json%22%3A%7B%22username%22%3A%22{username}%22%2C%22authed%22%3Atrue%7D%7D"
headers = {
"authority": "civitai.com",
"accept": "*/*",
"accept-language": "en-BR,en;q=0.9,pt-BR;q=0.8,pt;q=0.7,es-ES;q=0.6,es;q=0.5,de-LI;q=0.4,de;q=0.3,en-GB;q=0.2,en-US;q=0.1,sk;q=0.1",
"content-type": "application/json",
"cookie": f'{os.environ["COOKIE_INFO"]}',
"if-modified-since": "Tue, 22 Aug 2023 07:18:52 GMT",
"referer": f"https://civitai.com/user/{username}/models",
"sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "macOS",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
}
response = requests.get(url, headers=headers)
return response.json()
def extract_huggingface_username(username):
data = get_creator(username)
links = data.get('result', {}).get('data', {}).get('json', {}).get('links', [])
for link in links:
url = link.get('url', '')
if url.startswith('https://huggingface.co/') or url.startswith('https://www.huggingface.co/'):
username = url.split('/')[-1]
return username
return None
def check_civit_link(profile: Optional[gr.OAuthProfile], url):
info, _ = process_url(url, do_download=False)
hf_username = extract_huggingface_username(info['creator'])
attributes_methods = dir(profile)
#if(not hf_username):
# no_username_text = f'If you are {info["creator"]} on CivitAI, hi! Your CivitAI profile seems to not have information about your Hugging Face account. Please visit https://civitai.com/user/account and include it there
(if you are not {info["creator"]}, you cannot submit their model at this time)'
# return no_username_text, gr.update(interactive=False), gr.update(visible=True), gr.update(visible=False)
#if(profile.preferred_username != hf_username):
# unmatched_username_text = '