import gradio as gr
from huggingface_hub import HfApi, list_files_info, get_full_repo_name
import requests
import json
import os
from PIL import Image
import urllib.request
import uuid
uid=uuid.uuid4()
token = ""
source_dataset = "jbilcke-hf/ai-tube-index"
api = HfApi(token=token)
token_self = os.environ['HF_TOKEN']
def find_dataset(author=""):
s_ist = (api.list_datasets(author=author,search="ai-tube"))
#print(api.whoami())
channels=[]
videos=[]
for space in s_ist:
try:
channels.append(space.id)
except Exception:
pass
return (gr.Dropdown(label="Channels", choices=[s for s in channels],interactive=True))
def load_vid(channel):
out_html=""
label_html=""
vid_list=[]
sync_list=[]
try:
file_box = [info.path for info in list_files_info(f'{channel}', repo_type="dataset")]
ai_tube_box = [info.path for info in list_files_info(f'{source_dataset}', repo_type="dataset")]
for file in file_box:
if file.startswith("prompt_"):
prompt_slug = file.split("prompt_",1)[1].split(".",1)[0]
out_html=f'{out_html}
• {file}
'
#print(prompt_slug)
for vid_file in ai_tube_box:
try:
vid_slug=vid_file.split("videos/",1)[1].split(".",1)[0]
#print(vid_slug)
try:
if vid_slug == prompt_slug:
out_html=f'{out_html}
+ {vid_file}
'
if vid_file.endswith(".json"):
r = requests.get(f'https://huggingface.co/datasets/jbilcke-hf/ai-tube-index/raw/main/videos/{vid_slug}.json')
#r = requests.get(f'https://huggingface.co/datasets/jbilcke-hf/ai-tube-index/blob/main/videos/00b4bcda-7b4a-40f8-9833-e490425a7b91.mp4')
json_out=r.json()
label_html=f'{label_html}{json_out["label"]} {json_out["description"]} '
#print (r.json())
if vid_file.endswith(".mp4"):
vid_list.append(json_out["label"])
sync_list.append(vid_file)
#