Spaces:
Running
Running
kwabs22
commited on
Commit
•
f6ac57c
1
Parent(s):
0599bec
Offline changes added (some links to spaces, etc.)
Browse files
app.py
CHANGED
@@ -27,7 +27,7 @@ from langdetect import detect
|
|
27 |
import datetime
|
28 |
import cv2
|
29 |
import math
|
30 |
-
|
31 |
from youtube_transcript_api import YouTubeTranscriptApi
|
32 |
from spacy_syllables import SpacySyllables #https://spacy.io/universe/project/spacy_syllables/
|
33 |
import torch
|
@@ -1778,7 +1778,7 @@ def lingualinkassist(text, language):
|
|
1778 |
|
1779 |
def w4wsidebysidereadergen(text, langdest):
|
1780 |
#FrontRevSentChunk as reference
|
1781 |
-
FinalOutput = "
|
1782 |
Translated = "FWNWO: \n"
|
1783 |
words = text.split()
|
1784 |
w4wsidebysidtranslator = Translator()
|
@@ -1795,8 +1795,11 @@ def w4wsidebysidereadergen(text, langdest):
|
|
1795 |
# print(f"Pronunciation: {obj.pronunciation}\n")
|
1796 |
FinalOutput += obj.origin + f" ({obj.text}) "
|
1797 |
Translated += obj.text + " "
|
1798 |
-
speech = gTTS(text=
|
1799 |
speech.save("CurrentSidebySideTTSFile.mp3")
|
|
|
|
|
|
|
1800 |
analysisPrompt = f"{ Translated } and \n\nFWFWO: \n{ translatedFWO.text } \n\nForeign Words Native Word Order and Foreign Word Order \nIf you had to make the notes on the word by word considerations to transform FWNWO to FWFWO what would that be? (A simple game idea where your response will be the rubrik to mark the players response against)"
|
1801 |
return FinalOutput, Translated, "FWFWO: \n" + translatedFWO.text, "CurrentSidebySideTTSFile.mp3", analysisPrompt
|
1802 |
|
@@ -2014,38 +2017,51 @@ SplitVideoOutput = gr.FileExplorer(root_dir='./splitvideo')
|
|
2014 |
|
2015 |
with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
|
2016 |
gr.HTML('<div style="display: flex; justify-content: center; align-items: center; height: 100%;"> Reading comprehension speed through picture based compression (collage), Live Image Subtitles and Listening Comprehension Test - <a href="https://chat.openai.com/g/g-bYMSVlb8y-lingua-link"> -- Lingua Link (Simple GPT for assistinng image creation) -- </a> | </div><div style="display: flex; justify-content: center; align-items: center; height: 100%;"> ---- Under Construction: Very Slowly figuring out what AI intergrated interface means (Chat vs Forms vs Function calling vs Sensor + Trigger vs Agent) | How to end copy paste once and for all? ---- </div> <div style="display: flex; justify-content: center; align-items: center; height: 100%;"> All the apis from the below space need to be treated like RAG as notes for the LLM to read before providing its answer </div>')
|
2017 |
-
with gr.Accordion("
|
2018 |
-
with gr.
|
2019 |
-
|
2020 |
-
|
2021 |
-
|
2022 |
-
|
2023 |
-
|
2024 |
-
with gr.
|
2025 |
-
|
2026 |
-
|
2027 |
-
|
2028 |
-
|
2029 |
-
|
2030 |
-
|
2031 |
-
|
2032 |
-
|
2033 |
-
|
2034 |
-
|
2035 |
-
|
2036 |
-
|
2037 |
-
|
2038 |
-
|
2039 |
-
|
2040 |
-
|
2041 |
-
|
2042 |
-
|
2043 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2044 |
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
2045 |
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
2046 |
with gr.Row():
|
2047 |
with gr.Column(scale=1):
|
2048 |
-
gr.HTML(""" <div style="height: 350px; width: 100%; border: 1px solid black; overflow: auto;"> Some useful links <br> <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://openxlab.org.cn/apps'> -- Openxlabs - Huggingface Alternative -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> = <a href='https://github.com/facebookresearch/fairseq/tree/nllb'> -- NLLB Github -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceM4/idefics_playground'> -- IDEFICS - open Multimodal model -- </a> | <a href='https://github.com/facebookresearch/belebele'> -- Belebele (Meta Dataset) -- </a> | <a href='https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory'> -- AI Comic Factory -- </a> | <a href='https://github.com/camenduru'> -- CAMENDURU REPOS -- </a> | <a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> | <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter (alt to ChatGPT Pro) -- </a> | <a href='https://easywithai.com/fun-ai-tools/'> -- List - Easy with AI -- </a> | <a href='https://huggingface.co/spaces/Xenova/whisper-web'> -- Whisper Web (UI) -- </a> | <a href='https://blog.roblox.com/2023/09/revolutionizing-creation-roblox/'> -- Roblox Assistant -- </a> | <a href='https://huggingface.co/spaces/AP123/IllusionDiffusion'> -- Illusion Diffusion (Hide words or shapes in the image) -- </a> | <a href='https://huggingface.co/spaces/Shopify/background-replacement'> -- Background replacement - Shopify -- </a> | <a href='https://huggingface.co/spaces/multimodalart/LoraTheExplorer'> -- Lora The Explorer (SDXL) -- </a> | <a href='https://huggingface.co/spaces/XCLiu/InstaFlow'> -- InstaFlow (Under 1 second Inference) -- </a> | <a href='https://github.com/tairov/llama2.mojo'> -- TinyStories on mojo (230+ tk/s) -- </a> | <a href='https://emojis.alexandru.so/p/OHVEmfMwQl'> -- Any Emoji you want - emojijs -- </a> | <a href='https://huggingface.co/spaces/google/sdxl'> -- SDXL on TPUv5 -- </a> | <a href='https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model'> -- LCM - SD1.5 at 7secs per 4 images (after coldstart) -- </a> | <a href='https://huggingface.co/spaces/fffiloni/sdxl-control-loras'> -- SDXL Control Lora -- </a> | <a href='https://huggingface.co/spaces/aadnk/faster-whisper-webui'> -- Whisper WebUI -- </a> | <a href='https://huggingface.co/spaces/guoyww/AnimateDiff'> -- AnimateDiff: Create an image make a video -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless-m4t-v2-large'> -- Seamless m4t v2 -- </a> | <a href='https://huggingface.co/spaces/Otter-AI/OtterHD-Demo'> -- OtterHD: Multimodal model -- </a> | <a href='https://ai.meta.com/blog/ego-exo4d-video-learning-perception/'> -- Ego-exo4d Multimodal dataset -- </a> | <a href='https://imagine.meta.com/'> -- Meta Imagine images (Free) -- </a> | <a href='https://www.mage.space/'> -- Mage Space images (Free) -- </a> | <a href='https://www.bing.com/images/create?FORM=GENILP'> -- Bing Image Creator (Free) -- </a> | <a href='https://jalammar.github.io/'> -- Jay Alammar Blog - Illustrated Transformer, Stable Diffusion and More -- </a> | <a href='https://huggingface.co/spaces/myshell-ai/OpenVoice'> -- OpenVoice - Open Source Voice Clone -- </a> |
|
2049 |
with gr.Tabs() as nav1:
|
2050 |
with gr.Tab("Rep - HTML"):
|
2051 |
gr.HTML("UNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know")
|
@@ -2422,7 +2438,7 @@ Each type of knowing involves different cognitive processes and levels of unders
|
|
2422 |
gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file")
|
2423 |
gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textbox", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.")
|
2424 |
with gr.Tab("Real-Time AI - Video/Audio/AR"):
|
2425 |
-
gr.HTML("<div style='display: flex; justify-content: center; align-items: center; height: 100%;'> Agents = Custom Software (Personalised UI and Mods, among other things) = Custom Environments (AR) <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter -- </a> | <a href='https://github.com/microsoft/autogen'> -- Microsoft Autogen -- </a> | </div>")
|
2426 |
gr.HTML("Steerable Surveillance system = Assistant --- people will need to manage their own infrastructure or risk total invasion of privacy")
|
2427 |
gr.HTML("Cheap: Raspberry Pi / Pydroid and OpenCV and Tkinter = Frontend for OpenAI / OSS vision API as ChatGPT doesnt support video / real-time screenshot yet <br>Expensive - XREAL Air 2, Quest 3, Vision Pro, ")
|
2428 |
gr.HTML("HUD Experiment (Waiting for GPT4V API) - Full context of user situation + Ability to communicate in real-time to user using images (H100+ and low enough resolution and low enough steps - it/s = fps) - just like google maps but for real life")
|
@@ -2521,4 +2537,4 @@ Each type of knowing involves different cognitive processes and levels of unders
|
|
2521 |
gr.Textbox(label='Use this text to hold translations of the SQL rows in the above linked dataset (A kind of What I say vs what I want)')
|
2522 |
|
2523 |
|
2524 |
-
lliface.queue().launch(share=True) #
|
|
|
27 |
import datetime
|
28 |
import cv2
|
29 |
import math
|
30 |
+
from langchain.document_loaders import YoutubeLoader #need youtube_transcpt_api and pytube installed
|
31 |
from youtube_transcript_api import YouTubeTranscriptApi
|
32 |
from spacy_syllables import SpacySyllables #https://spacy.io/universe/project/spacy_syllables/
|
33 |
import torch
|
|
|
1778 |
|
1779 |
def w4wsidebysidereadergen(text, langdest):
|
1780 |
#FrontRevSentChunk as reference
|
1781 |
+
FinalOutput = ""
|
1782 |
Translated = "FWNWO: \n"
|
1783 |
words = text.split()
|
1784 |
w4wsidebysidtranslator = Translator()
|
|
|
1795 |
# print(f"Pronunciation: {obj.pronunciation}\n")
|
1796 |
FinalOutput += obj.origin + f" ({obj.text}) "
|
1797 |
Translated += obj.text + " "
|
1798 |
+
speech = gTTS(text=FinalOutput, lang=langdest[:2], slow="False")
|
1799 |
speech.save("CurrentSidebySideTTSFile.mp3")
|
1800 |
+
|
1801 |
+
FinalOutput = "Side by Side Version: " + FinalOutput
|
1802 |
+
|
1803 |
analysisPrompt = f"{ Translated } and \n\nFWFWO: \n{ translatedFWO.text } \n\nForeign Words Native Word Order and Foreign Word Order \nIf you had to make the notes on the word by word considerations to transform FWNWO to FWFWO what would that be? (A simple game idea where your response will be the rubrik to mark the players response against)"
|
1804 |
return FinalOutput, Translated, "FWFWO: \n" + translatedFWO.text, "CurrentSidebySideTTSFile.mp3", analysisPrompt
|
1805 |
|
|
|
2017 |
|
2018 |
with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
|
2019 |
gr.HTML('<div style="display: flex; justify-content: center; align-items: center; height: 100%;"> Reading comprehension speed through picture based compression (collage), Live Image Subtitles and Listening Comprehension Test - <a href="https://chat.openai.com/g/g-bYMSVlb8y-lingua-link"> -- Lingua Link (Simple GPT for assistinng image creation) -- </a> | </div><div style="display: flex; justify-content: center; align-items: center; height: 100%;"> ---- Under Construction: Very Slowly figuring out what AI intergrated interface means (Chat vs Forms vs Function calling vs Sensor + Trigger vs Agent) | How to end copy paste once and for all? ---- </div> <div style="display: flex; justify-content: center; align-items: center; height: 100%;"> All the apis from the below space need to be treated like RAG as notes for the LLM to read before providing its answer </div>')
|
2020 |
+
with gr.Accordion("Some Useful Spaces", open=False):
|
2021 |
+
with gr.Accordion("Translation or STT HF Spaces/Sites (Click Here to Open) - Use to get rough translations", open=False):
|
2022 |
+
with gr.Row():
|
2023 |
+
linktotranslate = gr.Dropdown(choices=["https://facebook-seamless-m4t-v2-large.hf.space", "https://hf-audio-whisper-large-v3.hf.space", "https://pyf98-owsm-v3-demo.hf.space", "https://kadirnar-multilingual-translation.hf.space", "https://geonmo-nllb-translation-demo.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
2024 |
+
translatespacebtn = gr.Button("Use the chosen URL to load interface with a translate model")
|
2025 |
+
translatespace = gr.HTML("Translate Space Chosen will load here")
|
2026 |
+
translatespacebtn.click(display_website, inputs=linktotranslate, outputs=translatespace)
|
2027 |
+
with gr.Accordion("Audio Gen HF Spaces/Sites (Click Here to Open)", open=False):
|
2028 |
+
with gr.Row():
|
2029 |
+
linktoaudiogen = gr.Dropdown(choices=["https://coqui-xtts.hf.space", "https://suno-bark.hf.space", "https://mrfakename-metavoice-1b-v0-1.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
2030 |
+
audiogenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
|
2031 |
+
audiogenspace = gr.HTML("Chat Space Chosen will load here")
|
2032 |
+
audiogenspacebtn.click(display_website, inputs=linktoaudiogen, outputs=audiogenspace)
|
2033 |
+
with gr.Accordion("Image Gen or Animation HF Spaces/Sites (Click Here to Open) - Use with the image placeholder in Workflows tab", open=False):
|
2034 |
+
with gr.Row():
|
2035 |
+
linktoimagegen = gr.Dropdown(choices=["https://wangfuyun-animatelcm.hf.space", "https://artgan-diffusion-api.hf.space", "https://multimodalart-stable-cascade.hf.space", "https://radames-real-time-text-to-image-sdxl-lightning.hf.space", "https://ap123-sdxl-lightning.hf.space", "https://google-sdxl.hf.space", "https://guoyww-animatediff.hf.space", "https://segmind-segmind-stable-diffusion.hf.space", "https://simianluo-latent-consistency-model.hf.space", "https://artificialguybr-studio-ghibli-lora-sdxl.hf.space", "https://artificialguybr-pixel-art-generator.hf.space", "https://fffiloni-sdxl-control-loras.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
2036 |
+
imagegenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
|
2037 |
+
imagegenspace = gr.HTML("Chat Space Chosen will load here")
|
2038 |
+
imagegenspacebtn.click(display_website, inputs=linktoimagegen, outputs=imagegenspace)
|
2039 |
+
with gr.Accordion("Vision HF Spaces/Sites (Click Here to Open)", open=False):
|
2040 |
+
with gr.Row():
|
2041 |
+
linktovisionund = gr.Dropdown(choices=["", "https://languagebind-moe-llava.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
2042 |
+
visionundspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
|
2043 |
+
visionundspace = gr.HTML("Chat Space Chosen will load here")
|
2044 |
+
visionundspacebtn.click(display_website, inputs=linktovisionund, outputs=visionundspace)
|
2045 |
+
with gr.Accordion("LLM HF Spaces/Sites (Click Here to Open) - Use 'Acronym Map Creation Space' Tab with this - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
|
2046 |
+
with gr.Row():
|
2047 |
+
linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://labs.perplexity.ai/", "https://chat.lmsys.org", "https://stabilityai-stablelm-2-1-6b-zephyr.hf.space", "https://deepseek-ai-deepseek-coder-7b-instruct.hf.space", "https://01-ai-yi-34b-chat.hf.space", "https://ysharma-zephyr-playground.hf.space", "https://huggingfaceh4-zephyr-chat.hf.space", "https://osanseviero-mistral-super-fast.hf.space", "https://artificialguybr-qwen-14b-chat-demo.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
2048 |
+
chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left. For lymsys / chat arena copy the link and use a new tab")
|
2049 |
+
with gr.Accordion("Some prompt ideas", open=False):
|
2050 |
+
with gr.Accordion("Prompts in text (Manual copy paste)", open=False):
|
2051 |
+
gr.HTML(LLPromptIdeas)
|
2052 |
+
with gr.Group():
|
2053 |
+
promptidea0 = gr.Code(label="Prompt Idea 1", value=LLPromptIdeasasbtns[0])
|
2054 |
+
promptidea1 = gr.Code(label="Prompt Idea 2", value=LLPromptIdeasasbtns[1])
|
2055 |
+
promptidea2 = gr.Code(label="Prompt Idea 3", value=LLPromptIdeasasbtns[2])
|
2056 |
+
promptidea3 = gr.Code(label="Prompt Idea 4", value=LLPromptIdeasasbtns[3])
|
2057 |
+
promptidea4 = gr.Code(label="Prompt Idea 5", value=LLPromptIdeasasbtns[4])
|
2058 |
+
chatspace = gr.HTML("Chat Space Chosen will load here")
|
2059 |
+
chatspacebtn.click(display_website, inputs=linktochat, outputs=chatspace)
|
2060 |
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
2061 |
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
2062 |
with gr.Row():
|
2063 |
with gr.Column(scale=1):
|
2064 |
+
gr.HTML(""" <div style="height: 350px; width: 100%; border: 1px solid black; overflow: auto;"> Some useful links <br> <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://openxlab.org.cn/apps'> -- Openxlabs - Huggingface Alternative -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> = <a href='https://github.com/facebookresearch/fairseq/tree/nllb'> -- NLLB Github -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceM4/idefics_playground'> -- IDEFICS - open Multimodal model -- </a> | <a href='https://github.com/facebookresearch/belebele'> -- Belebele (Meta Dataset) -- </a> | <a href='https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory'> -- AI Comic Factory -- </a> | <a href='https://github.com/camenduru'> -- CAMENDURU REPOS -- </a> | <a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> | <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter (alt to ChatGPT Pro) -- </a> | <a href='https://easywithai.com/fun-ai-tools/'> -- List - Easy with AI -- </a> | <a href='https://huggingface.co/spaces/Xenova/whisper-web'> -- Whisper Web (UI) -- </a> | <a href='https://blog.roblox.com/2023/09/revolutionizing-creation-roblox/'> -- Roblox Assistant -- </a> | <a href='https://huggingface.co/spaces/AP123/IllusionDiffusion'> -- Illusion Diffusion (Hide words or shapes in the image) -- </a> | <a href='https://huggingface.co/spaces/Shopify/background-replacement'> -- Background replacement - Shopify -- </a> | <a href='https://huggingface.co/spaces/multimodalart/LoraTheExplorer'> -- Lora The Explorer (SDXL) -- </a> | <a href='https://huggingface.co/spaces/XCLiu/InstaFlow'> -- InstaFlow (Under 1 second Inference) -- </a> | <a href='https://github.com/tairov/llama2.mojo'> -- TinyStories on mojo (230+ tk/s) -- </a> | <a href='https://emojis.alexandru.so/p/OHVEmfMwQl'> -- Any Emoji you want - emojijs -- </a> | <a href='https://huggingface.co/spaces/google/sdxl'> -- SDXL on TPUv5 -- </a> | <a href='https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model'> -- LCM - SD1.5 at 7secs per 4 images (after coldstart) -- </a> | <a href='https://huggingface.co/spaces/fffiloni/sdxl-control-loras'> -- SDXL Control Lora -- </a> | <a href='https://huggingface.co/spaces/aadnk/faster-whisper-webui'> -- Whisper WebUI -- </a> | <a href='https://huggingface.co/spaces/guoyww/AnimateDiff'> -- AnimateDiff: Create an image make a video -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless-m4t-v2-large'> -- Seamless m4t v2 -- </a> | <a href='https://huggingface.co/spaces/Otter-AI/OtterHD-Demo'> -- OtterHD: Multimodal model -- </a> | <a href='https://ai.meta.com/blog/ego-exo4d-video-learning-perception/'> -- Ego-exo4d Multimodal dataset -- </a> | <a href='https://imagine.meta.com/'> -- Meta Imagine images (Free) -- </a> | <a href='https://www.mage.space/'> -- Mage Space images (Free) -- </a> | <a href='https://www.bing.com/images/create?FORM=GENILP'> -- Bing Image Creator (Free) -- </a> | <a href='https://jalammar.github.io/'> -- Jay Alammar Blog - Illustrated Transformer, Stable Diffusion and More -- </a> | <a href='https://huggingface.co/spaces/myshell-ai/OpenVoice'> -- OpenVoice - Open Source Voice Clone -- </a> | <a href='https://huggingface.co/spaces/fffiloni/live-vision'> -- Live-Vision HF Space - Live commentary on a video feed demo -- </a> | <a href='https://xenova.github.io/transformers.js/'> -- Transformers JS demo - Xenova (HF) -- </a> | <a href='https://huggingface.co/chat/assistants'> -- Huggingface Assistants -- </a> | <a href='https://huggingface.co/spaces/AP123/SDXL-Lightning'> -- 4-step SDXL Inference through LORA -- </a> | </div>""")
|
2065 |
with gr.Tabs() as nav1:
|
2066 |
with gr.Tab("Rep - HTML"):
|
2067 |
gr.HTML("UNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know")
|
|
|
2438 |
gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file")
|
2439 |
gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textbox", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.")
|
2440 |
with gr.Tab("Real-Time AI - Video/Audio/AR"):
|
2441 |
+
gr.HTML("<div style='display: flex; justify-content: center; align-items: center; height: 100%;'> Agents = Custom Software (Personalised UI and Mods, among other things) = Custom Environments (AR) <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter -- </a> | <a href='https://github.com/microsoft/autogen'> -- Microsoft Autogen -- </a> | <a href='https://huggingface.co/chat/assistants'> -- Huggingface Assistants -- </a> | </div>")
|
2442 |
gr.HTML("Steerable Surveillance system = Assistant --- people will need to manage their own infrastructure or risk total invasion of privacy")
|
2443 |
gr.HTML("Cheap: Raspberry Pi / Pydroid and OpenCV and Tkinter = Frontend for OpenAI / OSS vision API as ChatGPT doesnt support video / real-time screenshot yet <br>Expensive - XREAL Air 2, Quest 3, Vision Pro, ")
|
2444 |
gr.HTML("HUD Experiment (Waiting for GPT4V API) - Full context of user situation + Ability to communicate in real-time to user using images (H100+ and low enough resolution and low enough steps - it/s = fps) - just like google maps but for real life")
|
|
|
2537 |
gr.Textbox(label='Use this text to hold translations of the SQL rows in the above linked dataset (A kind of What I say vs what I want)')
|
2538 |
|
2539 |
|
2540 |
+
lliface.queue().launch(share=True) #docker #(inbrowser="true") #colab
|