Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -32,28 +32,29 @@ import math
|
|
32 |
#subprocess.run(["pip", "install", "--upgrade", "gradio==3.47.1"]) #For huggingface as they sometimes install specific versions on container build
|
33 |
|
34 |
#Uncomment these for Huggingface
|
35 |
-
nltk.download('maxent_ne_chunker') #Chunker
|
36 |
-
nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
|
37 |
-
nltk.download('words') #200 000+ Alphabetical order list
|
38 |
-
nltk.download('punkt') #Tokenizer
|
39 |
-
nltk.download('verbnet') #For Description of Verbs
|
40 |
-
nltk.download('omw')
|
41 |
-
nltk.download('omw-1.4') #Multilingual Wordnet
|
42 |
-
nltk.download('wordnet') #For Definitions, Antonyms and Synonyms
|
43 |
-
nltk.download('shakespeare')
|
44 |
-
nltk.download('dolch') #Sight words
|
45 |
-
nltk.download('names') #People Names NER
|
46 |
-
nltk.download('gazetteers') #Location NER
|
47 |
-
nltk.download('opinion_lexicon') #Sentiment words
|
48 |
-
nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
|
49 |
-
nltk.download('udhr') # Declaration of Human rights in many languages
|
50 |
-
|
51 |
-
|
52 |
-
spacy.cli.download(
|
53 |
-
spacy.cli.download('
|
54 |
-
spacy.cli.download('
|
55 |
-
spacy.cli.download(
|
56 |
-
spacy.cli.download("
|
|
|
57 |
|
58 |
nlp_en = spacy.load("en_core_web_sm")
|
59 |
nlp_de = spacy.load("de_core_news_sm")
|
@@ -1193,9 +1194,10 @@ def imagebasedreading(inputtext):
|
|
1193 |
#--------
|
1194 |
|
1195 |
guidedreadingseperator = gr.Dropdown(choices=[".", ",", "lines", "Sentences"], value=".", interactive=True)
|
1196 |
-
|
1197 |
-
|
1198 |
-
|
|
|
1199 |
|
1200 |
LLPromptIdeas = """
|
1201 |
|
@@ -1308,9 +1310,53 @@ def segment_video_with_opencv(file_path, segment_duration=60):
|
|
1308 |
|
1309 |
return generated_files
|
1310 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1311 |
def TestSplitandUpdate(Text):
|
1312 |
|
1313 |
-
return f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1314 |
|
1315 |
# For testing purposes
|
1316 |
# file_paths = segment_video_with_opencv("path_to_your_video.mp4")
|
@@ -1328,6 +1374,7 @@ groupinput_text = gr.Textbox(lines=2, label="Enter a list of words")
|
|
1328 |
groupoutput_text = gr.Textbox(label="Grouped words")
|
1329 |
|
1330 |
Translationchuncksize = gr.Number(value=4998)
|
|
|
1331 |
|
1332 |
randomExposuremessage = randommarquee()
|
1333 |
randomExposuremessage2 = randommarquee()
|
@@ -1338,7 +1385,7 @@ VideoSplitTestInput = gr.File(label="select a mp4 video file", file_types=[".mp4
|
|
1338 |
|
1339 |
with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
|
1340 |
gr.HTML('<div style="display: flex; justify-content: center; align-items: center; height: 100%;"> ---- Under Construction: Very Slowly figuring out what AI intergrated interface means (Chat vs Forms vs Function calling vs Sensor + Trigger vs Agent) | How to end copy paste once and for all? ---- </div> <div style="display: flex; justify-content: center; align-items: center; height: 100%;"> All the apis from the below space need to be treated like RAG as notes for the LLM to read before providing its answer </div>')
|
1341 |
-
with gr.Accordion("LLM HF Spaces (Click Here to Open) - Use 'Acronym Map Creation Space' Tab with this - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
|
1342 |
with gr.Row():
|
1343 |
linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://labs.perplexity.ai/", "https://chat.lmsys.org", "https://huggingfaceh4-zephyr-chat.hf.space", "https://osanseviero-mistral-super-fast.hf.space", "https://artificialguybr-qwen-14b-chat-demo.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
1344 |
chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left")
|
@@ -1348,7 +1395,13 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1348 |
gr.Interface(loadforcopybuttonllmpromptideas, inputs=None, outputs=["html", "code", "code", "code", "code", "code"])
|
1349 |
chatspace = gr.HTML("Chat Space Chosen will load here")
|
1350 |
chatspacebtn.click(display_website, inputs=linktochat, outputs=chatspace)
|
1351 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
1352 |
with gr.Row():
|
1353 |
with gr.Column(scale=1):
|
1354 |
with gr.Tabs() as nav1:
|
@@ -1379,7 +1432,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1379 |
with gr.Tab("Workflows"):
|
1380 |
with gr.Row():
|
1381 |
gr.HTML("<span style:{'fontsize: 20'}>Start at Unkown Tracker if unseure<span> <br> UNNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know <br><br> General Ideas in this space - Speed of Learning = Avoid Things you know like the plague -- How to track what you know -- Counter is easiest and How you feel is the hardest (The more you know, the more confusion on what you dont know as you probably werent keeping track) <br><br> Visulisation of long text - Bottom of this page <br> Wordlist - 1 new word at a time per minute in the space to the left <br> Youtube Video Watching - Subtitles Tab <br> Reading - Unknown Tracker Tabs <br> Longer Text Memorising - Acronym Map Creation Tab and Transition Tab <br> Brainstorming - Reading Assistant <br> Random Exposure <br> ")
|
1382 |
-
gr.Interface(fn=TestSplitandUpdate, inputs=
|
1383 |
with gr.Row():
|
1384 |
PracticeExposure = gr.HTML(randomExposuremessage)
|
1385 |
PracticeExposure2 = gr.HTML(randomExposuremessage2)
|
@@ -1389,7 +1442,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1389 |
gr.HTML("Advanced Repitition = Combinatorics --> to understand a sentence properly you need understanding of every word --> in language that means use with other words --> Combos within the unique words in a sentence, paragraph, page, etc. --> as close to 3 word sentences")
|
1390 |
with gr.Column(scale=1):
|
1391 |
gr.HTML("<p>Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery </p>")
|
1392 |
-
gr.HTML(""" <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> = <a href='https://github.com/facebookresearch/fairseq/tree/nllb'> -- NLLB Github -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceM4/idefics_playground'> -- IDEFICS - open Multimodal model -- </a> | <a href='https://github.com/facebookresearch/belebele'> -- Belebele (Meta Dataset) -- </a> | <a href='https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory'> -- AI Comic Factory -- </a> | <a href='https://github.com/camenduru'> -- CAMENDURU REPOS -- </a> | <a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> | <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter (alt to ChatGPT Pro) -- </a> | <a href='https://easywithai.com/fun-ai-tools/'> -- List - Easy with AI -- </a> | <a href='https://huggingface.co/spaces/Xenova/whisper-web'> -- Whisper Web (UI) -- </a> | <a href='https://blog.roblox.com/2023/09/revolutionizing-creation-roblox/'> -- Roblox Assistant -- </a> | <a href='https://huggingface.co/spaces/AP123/IllusionDiffusion'> -- Illusion Diffusion (Hide words or shapes in the image) -- </a> | <a href='https://huggingface.co/spaces/Shopify/background-replacement'> -- Background replacement - Shopify -- </a> | <a href='https://huggingface.co/spaces/multimodalart/LoraTheExplorer'> -- Lora The Explorer (SDXL) -- </a> | <a href='https://huggingface.co/spaces/XCLiu/InstaFlow'> -- InstaFlow (Under 1 second Inference) -- </a> | <a href='https://github.com/tairov/llama2.mojo'> -- TinyStories on mojo (230+ tk/s) -- </a> | <a href='https://emojis.alexandru.so/p/OHVEmfMwQl'> -- Any Emoji you want - emojijs -- </a> | <a href='https://huggingface.co/spaces/google/sdxl'> -- SDXL on TPUv5 -- </a> |
|
1393 |
gr.HTML("Placeholder for every images of each sentence - Good ChatGPT + Dall-E ")
|
1394 |
with gr.Row():
|
1395 |
with gr.Column(scale=4):
|
@@ -1404,11 +1457,16 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1404 |
with gr.Column(scale=2):
|
1405 |
imageplaceholdertextoutput = gr.Code("The code for the HTML created will come here")
|
1406 |
imageplaceholderbtn.click(fn=imagebasedreading, inputs=[imageplaceholderinput], outputs=[imageplaceholderdownload, imageplaceholderoutput, imageplaceholdertextoutput])
|
|
|
|
|
|
|
1407 |
with gr.Tab("Progress Tracking"):
|
1408 |
gr.Label("Missing is database integration for the counter and non-english - ALSO TODO - Parralell interface for the html and acronym creator")
|
1409 |
gr.Interface(fn=UnknownTrackTexttoApp, inputs="text", outputs=["file", "html", "text"], description="HTML mini App - UNNWFWO (To track verbs you dont know for listening practice). Use the text from here to create lists you use for the TTS section")
|
1410 |
gr.Interface(create_acronym_map, inputs='text', outputs=['text', 'text'])
|
1411 |
gr.HTML("On the Acronyms you need to underline the verbs")
|
|
|
|
|
1412 |
with gr.Tab("Beginner - Listen + Read"):
|
1413 |
gr.Label("Closed Eye Recital per new word | 1 new word a minute while recycling the words from the previous minutes")
|
1414 |
with gr.Row():
|
@@ -1416,7 +1474,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1416 |
gr.HTML("Listening - Songs - Chorus <br> Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <br> <b>Tiny Stories dataset is like a graded reader</b> <br>")
|
1417 |
gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions, "checkbox"], outputs="audio", description="Paste chorus lyrics from below here and use TTS or make notes to save here (Or paste anything)")
|
1418 |
with gr.Accordion("TTS Spaces", open=False):
|
1419 |
-
TTSspaceoptions = gr.Dropdown(choices=["https://
|
1420 |
TTSspaceoptionsbtn = gr.Button("Load a Image as prompt Space")
|
1421 |
TTSspaceoptionsOut = gr.HTML()
|
1422 |
TTSspaceoptionsbtn.click(fn=display_website, inputs=TTSspaceoptions, outputs=TTSspaceoptionsOut)
|
@@ -1443,7 +1501,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1443 |
gr.HTML(" <a href='https://www.onenote.com/learningtools'> -- Microsoft Immersive Reader (Comprehension) -- </a> | <a href='https://www.lingq.com/en/'> LingQ - (Word Familiarity based) </a> ")
|
1444 |
gr.HTML("Repitition of things you know is a waste of time when theres stuff you dont know <p> In Language the goal is bigger vocab --> Knowledge equivalent = question answer pairs but to get to those you need related information pairs</p> <p> Vocab = Glossary + all non text wall(lists, diagrams, etc.)</p>")
|
1445 |
gr.Textbox("Placeholder for a function that creates a set list and can takes a list for known words and auto find replaces the stuff you know out of the content")
|
1446 |
-
gr.Interface(fn=GuidedReading, inputs=["text", guidedreadingseperator], outputs="text", description="Manual POS Tag and Transliteration"
|
1447 |
gr.HTML("Place holder for a translate to english interface so that highlighting can still work as only english supported for now - <a href='https://translate.google.com/'> -- Google Translate -- </a>")
|
1448 |
with gr.Tab("Unique word ID - use in Infranodus"):
|
1449 |
with gr.Accordion(label="Infranodus", open=False):
|
@@ -1625,4 +1683,4 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1625 |
gr.HTML("Education = Learning things you didnt know yesterday and not forgetting more than you learn <br><br> What you didnt know forms = <br> Glossary <br> Lists <br> Formulas <br> graphs <br> Procedures <br> <br> for each you will need a seperate way to track the progress but amount of times + recency = approximate state ")
|
1626 |
|
1627 |
|
1628 |
-
lliface.queue().launch(
|
|
|
32 |
#subprocess.run(["pip", "install", "--upgrade", "gradio==3.47.1"]) #For huggingface as they sometimes install specific versions on container build
|
33 |
|
34 |
#Uncomment these for Huggingface
|
35 |
+
#nltk.download('maxent_ne_chunker') #Chunker
|
36 |
+
#nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
|
37 |
+
#nltk.download('words') #200 000+ Alphabetical order list
|
38 |
+
#nltk.download('punkt') #Tokenizer
|
39 |
+
#nltk.download('verbnet') #For Description of Verbs
|
40 |
+
#nltk.download('omw')
|
41 |
+
#nltk.download('omw-1.4') #Multilingual Wordnet
|
42 |
+
#nltk.download('wordnet') #For Definitions, Antonyms and Synonyms
|
43 |
+
#nltk.download('shakespeare')
|
44 |
+
#nltk.download('dolch') #Sight words
|
45 |
+
#nltk.download('names') #People Names NER
|
46 |
+
#nltk.download('gazetteers') #Location NER
|
47 |
+
#nltk.download('opinion_lexicon') #Sentiment words
|
48 |
+
#nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
|
49 |
+
#nltk.download('udhr') # Declaration of Human rights in many languages
|
50 |
+
|
51 |
+
|
52 |
+
#spacy.cli.download("en_core_web_sm")
|
53 |
+
#spacy.cli.download('ko_core_news_sm')
|
54 |
+
#spacy.cli.download('ja_core_news_sm')
|
55 |
+
#spacy.cli.download('zh_core_web_sm')
|
56 |
+
#spacy.cli.download("es_core_news_sm")
|
57 |
+
#spacy.cli.download("de_core_news_sm")
|
58 |
|
59 |
nlp_en = spacy.load("en_core_web_sm")
|
60 |
nlp_de = spacy.load("de_core_news_sm")
|
|
|
1194 |
#--------
|
1195 |
|
1196 |
guidedreadingseperator = gr.Dropdown(choices=[".", ",", "lines", "Sentences"], value=".", interactive=True)
|
1197 |
+
textspreprocess = [
|
1198 |
+
["Bing created this example \nApple is looking at buying U.K. startup for $1 billion \nApple schaut sich für 1 Milliarde Dollar ein Startup aus Großbritannien an \nApple está mirando comprar una startup del Reino Unido por mil millones de dólares \n애플은 영국의 스타트업을 10억 달러에 사려고 한다 \nアップルは、英国のスタートアップを10億ドルで買収する予定だ \n苹果正考虑以10亿美元收购英国初创公司", "\n"],
|
1199 |
+
["This is an english sentence. This is the second english sentence", "."],
|
1200 |
+
]
|
1201 |
|
1202 |
LLPromptIdeas = """
|
1203 |
|
|
|
1310 |
|
1311 |
return generated_files
|
1312 |
|
1313 |
+
def fill_lines(input, num_lines=1000):
|
1314 |
+
# Split the input by newline and store it in a list
|
1315 |
+
input_list = input.splitlines()
|
1316 |
+
# Calculate how many lines each part of the input should get
|
1317 |
+
lines_per_part = int(num_lines // len(input_list))
|
1318 |
+
# Initialize an empty list to store the output
|
1319 |
+
output_list = []
|
1320 |
+
currentpart = ""
|
1321 |
+
# Loop through each part of the input
|
1322 |
+
for part in input_list:
|
1323 |
+
|
1324 |
+
currentpart += part + "\n"
|
1325 |
+
# Fill the list of strings into one string with newlines
|
1326 |
+
filled_part = currentpart * lines_per_part #textwrap.fill(wrapped_part, width=lines_per_part)
|
1327 |
+
# Append the filled part to the output list
|
1328 |
+
output_list.append(filled_part)
|
1329 |
+
currentpart = ""
|
1330 |
+
# Join the output list into one string with newlines
|
1331 |
+
output = "\n".join(output_list)
|
1332 |
+
|
1333 |
+
return output
|
1334 |
+
|
1335 |
+
def TestSplitandUpdatebtntest():
|
1336 |
+
gr.Info("Incomplete - Text Chosen for Interface")
|
1337 |
+
pass
|
1338 |
+
|
1339 |
def TestSplitandUpdate(Text):
|
1340 |
|
1341 |
+
return f" Length of the text - { len(Text) }", gr.Button("Incomplete - Set this Text as default for all interfaces") #.click(TestSplitandUpdatebtntest, inputs=None, outputs=None) - Returns the event instead of the button with the event
|
1342 |
+
|
1343 |
+
TestSplitandUpdateinput = gr.Textbox(placeholder="Counter and Placeholder one point of entry for the text to be analysed across the whole app")
|
1344 |
+
|
1345 |
+
def RepititionInjectedReading(learning, reading):
|
1346 |
+
readingdoc = nlp(reading)
|
1347 |
+
learninglist = learning.splitlines()
|
1348 |
+
FinalOutput = ""
|
1349 |
+
numofsentencesinreading = sum(1 for _ in readingdoc.sents) #len(readingdoc.sents) is wrong because of generator
|
1350 |
+
numofsentencesinlearning = len(learninglist)
|
1351 |
+
RepInjectedText = "\n"
|
1352 |
+
|
1353 |
+
for i in range(0, numofsentencesinlearning):
|
1354 |
+
for sent in readingdoc.sents:
|
1355 |
+
RepInjectedText += sent.text + " (" + learninglist[i] + ") "
|
1356 |
+
|
1357 |
+
FinalOutput = f"{ numofsentencesinreading } repitition oppurtunities between the sentences: \n { RepInjectedText }"
|
1358 |
+
|
1359 |
+
return FinalOutput
|
1360 |
|
1361 |
# For testing purposes
|
1362 |
# file_paths = segment_video_with_opencv("path_to_your_video.mp4")
|
|
|
1374 |
groupoutput_text = gr.Textbox(label="Grouped words")
|
1375 |
|
1376 |
Translationchuncksize = gr.Number(value=4998)
|
1377 |
+
RepSched_Num_lines = gr.Number(value=1000, label="number of lines")
|
1378 |
|
1379 |
randomExposuremessage = randommarquee()
|
1380 |
randomExposuremessage2 = randommarquee()
|
|
|
1385 |
|
1386 |
with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
|
1387 |
gr.HTML('<div style="display: flex; justify-content: center; align-items: center; height: 100%;"> ---- Under Construction: Very Slowly figuring out what AI intergrated interface means (Chat vs Forms vs Function calling vs Sensor + Trigger vs Agent) | How to end copy paste once and for all? ---- </div> <div style="display: flex; justify-content: center; align-items: center; height: 100%;"> All the apis from the below space need to be treated like RAG as notes for the LLM to read before providing its answer </div>')
|
1388 |
+
with gr.Accordion("LLM HF Spaces/Sites (Click Here to Open) - Use 'Acronym Map Creation Space' Tab with this - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
|
1389 |
with gr.Row():
|
1390 |
linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://labs.perplexity.ai/", "https://chat.lmsys.org", "https://huggingfaceh4-zephyr-chat.hf.space", "https://osanseviero-mistral-super-fast.hf.space", "https://artificialguybr-qwen-14b-chat-demo.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
1391 |
chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left")
|
|
|
1395 |
gr.Interface(loadforcopybuttonllmpromptideas, inputs=None, outputs=["html", "code", "code", "code", "code", "code"])
|
1396 |
chatspace = gr.HTML("Chat Space Chosen will load here")
|
1397 |
chatspacebtn.click(display_website, inputs=linktochat, outputs=chatspace)
|
1398 |
+
with gr.Accordion("Image HF Spaces/Sites (Click Here to Open) - Use with the image placeholder in Workflows tab", open=False):
|
1399 |
+
with gr.Row():
|
1400 |
+
linktoimagegen = gr.Dropdown(choices=["https://simianluo-latent-consistency-model.hf.space", "https://google-sdxl.hf.space", "https://fffiloni-sdxl-control-loras.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
1401 |
+
imagegenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
|
1402 |
+
imagegenspace = gr.HTML("Chat Space Chosen will load here")
|
1403 |
+
imagegenspacebtn.click(display_website, inputs=linktoimagegen, outputs=imagegenspace)
|
1404 |
+
gr.HTML("<div style='display: flex; justify-content: center; align-items: center; height: 100%;'> Agents = Custom Software (Personalised UI and Mods, among other things) = Custom Environments (AR) - <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter -- </a> | <a href='https://github.com/microsoft/autogen'> -- Microsoft Autogen -- </a> | </div>")
|
1405 |
with gr.Row():
|
1406 |
with gr.Column(scale=1):
|
1407 |
with gr.Tabs() as nav1:
|
|
|
1432 |
with gr.Tab("Workflows"):
|
1433 |
with gr.Row():
|
1434 |
gr.HTML("<span style:{'fontsize: 20'}>Start at Unkown Tracker if unseure<span> <br> UNNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know <br><br> General Ideas in this space - Speed of Learning = Avoid Things you know like the plague -- How to track what you know -- Counter is easiest and How you feel is the hardest (The more you know, the more confusion on what you dont know as you probably werent keeping track) <br><br> Visulisation of long text - Bottom of this page <br> Wordlist - 1 new word at a time per minute in the space to the left <br> Youtube Video Watching - Subtitles Tab <br> Reading - Unknown Tracker Tabs <br> Longer Text Memorising - Acronym Map Creation Tab and Transition Tab <br> Brainstorming - Reading Assistant <br> Random Exposure <br> ")
|
1435 |
+
gr.Interface(fn=TestSplitandUpdate, inputs=TestSplitandUpdateinput, outputs=["text", "button"])
|
1436 |
with gr.Row():
|
1437 |
PracticeExposure = gr.HTML(randomExposuremessage)
|
1438 |
PracticeExposure2 = gr.HTML(randomExposuremessage2)
|
|
|
1442 |
gr.HTML("Advanced Repitition = Combinatorics --> to understand a sentence properly you need understanding of every word --> in language that means use with other words --> Combos within the unique words in a sentence, paragraph, page, etc. --> as close to 3 word sentences")
|
1443 |
with gr.Column(scale=1):
|
1444 |
gr.HTML("<p>Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery </p>")
|
1445 |
+
gr.HTML(""" <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> = <a href='https://github.com/facebookresearch/fairseq/tree/nllb'> -- NLLB Github -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceM4/idefics_playground'> -- IDEFICS - open Multimodal model -- </a> | <a href='https://github.com/facebookresearch/belebele'> -- Belebele (Meta Dataset) -- </a> | <a href='https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory'> -- AI Comic Factory -- </a> | <a href='https://github.com/camenduru'> -- CAMENDURU REPOS -- </a> | <a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> | <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter (alt to ChatGPT Pro) -- </a> | <a href='https://easywithai.com/fun-ai-tools/'> -- List - Easy with AI -- </a> | <a href='https://huggingface.co/spaces/Xenova/whisper-web'> -- Whisper Web (UI) -- </a> | <a href='https://blog.roblox.com/2023/09/revolutionizing-creation-roblox/'> -- Roblox Assistant -- </a> | <a href='https://huggingface.co/spaces/AP123/IllusionDiffusion'> -- Illusion Diffusion (Hide words or shapes in the image) -- </a> | <a href='https://huggingface.co/spaces/Shopify/background-replacement'> -- Background replacement - Shopify -- </a> | <a href='https://huggingface.co/spaces/multimodalart/LoraTheExplorer'> -- Lora The Explorer (SDXL) -- </a> | <a href='https://huggingface.co/spaces/XCLiu/InstaFlow'> -- InstaFlow (Under 1 second Inference) -- </a> | <a href='https://github.com/tairov/llama2.mojo'> -- TinyStories on mojo (230+ tk/s) -- </a> | <a href='https://emojis.alexandru.so/p/OHVEmfMwQl'> -- Any Emoji you want - emojijs -- </a> | <a href='https://huggingface.co/spaces/google/sdxl'> -- SDXL on TPUv5 -- </a> | <a href='https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model'> -- LCM - SD1.5 at 7secs per 4 images (after coldstart) -- </a> | <a href='https://huggingface.co/spaces/fffiloni/sdxl-control-loras'> -- SDXL Control Lora -- </a> | <a href='https://huggingface.co/spaces/aadnk/faster-whisper-webui'> -- Whisper WebUI -- </a> | """)
|
1446 |
gr.HTML("Placeholder for every images of each sentence - Good ChatGPT + Dall-E ")
|
1447 |
with gr.Row():
|
1448 |
with gr.Column(scale=4):
|
|
|
1457 |
with gr.Column(scale=2):
|
1458 |
imageplaceholdertextoutput = gr.Code("The code for the HTML created will come here")
|
1459 |
imageplaceholderbtn.click(fn=imagebasedreading, inputs=[imageplaceholderinput], outputs=[imageplaceholderdownload, imageplaceholderoutput, imageplaceholdertextoutput])
|
1460 |
+
with gr.Tab("Repetition Injected Text"):
|
1461 |
+
gr.Label("Optimal Study Reps is inbetween new information acquisition - i.e. any thing you havent read already")
|
1462 |
+
gr.Interface(fn=RepititionInjectedReading, inputs=["text", "text"], outputs="text")
|
1463 |
with gr.Tab("Progress Tracking"):
|
1464 |
gr.Label("Missing is database integration for the counter and non-english - ALSO TODO - Parralell interface for the html and acronym creator")
|
1465 |
gr.Interface(fn=UnknownTrackTexttoApp, inputs="text", outputs=["file", "html", "text"], description="HTML mini App - UNNWFWO (To track verbs you dont know for listening practice). Use the text from here to create lists you use for the TTS section")
|
1466 |
gr.Interface(create_acronym_map, inputs='text', outputs=['text', 'text'])
|
1467 |
gr.HTML("On the Acronyms you need to underline the verbs")
|
1468 |
+
gr.HTML("Aim for 1000 reps per item in your mind - the end goal for full sentences is to identify the SOV equivalent ASAP")
|
1469 |
+
gr.Interface(fill_lines, inputs=["text", RepSched_Num_lines], outputs="text")
|
1470 |
with gr.Tab("Beginner - Listen + Read"):
|
1471 |
gr.Label("Closed Eye Recital per new word | 1 new word a minute while recycling the words from the previous minutes")
|
1472 |
with gr.Row():
|
|
|
1474 |
gr.HTML("Listening - Songs - Chorus <br> Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <br> <b>Tiny Stories dataset is like a graded reader</b> <br>")
|
1475 |
gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions, "checkbox"], outputs="audio", description="Paste chorus lyrics from below here and use TTS or make notes to save here (Or paste anything)")
|
1476 |
with gr.Accordion("TTS Spaces", open=False):
|
1477 |
+
TTSspaceoptions = gr.Dropdown(choices=["https://suno-bark.hf.space", "https://coqui-xtts.hf.space"], label="existing whisper spaces")
|
1478 |
TTSspaceoptionsbtn = gr.Button("Load a Image as prompt Space")
|
1479 |
TTSspaceoptionsOut = gr.HTML()
|
1480 |
TTSspaceoptionsbtn.click(fn=display_website, inputs=TTSspaceoptions, outputs=TTSspaceoptionsOut)
|
|
|
1501 |
gr.HTML(" <a href='https://www.onenote.com/learningtools'> -- Microsoft Immersive Reader (Comprehension) -- </a> | <a href='https://www.lingq.com/en/'> LingQ - (Word Familiarity based) </a> ")
|
1502 |
gr.HTML("Repitition of things you know is a waste of time when theres stuff you dont know <p> In Language the goal is bigger vocab --> Knowledge equivalent = question answer pairs but to get to those you need related information pairs</p> <p> Vocab = Glossary + all non text wall(lists, diagrams, etc.)</p>")
|
1503 |
gr.Textbox("Placeholder for a function that creates a set list and can takes a list for known words and auto find replaces the stuff you know out of the content")
|
1504 |
+
gr.Interface(fn=GuidedReading, inputs=["text", guidedreadingseperator], outputs="text", description="Manual POS Tag and Transliteration", examples=textspreprocess)
|
1505 |
gr.HTML("Place holder for a translate to english interface so that highlighting can still work as only english supported for now - <a href='https://translate.google.com/'> -- Google Translate -- </a>")
|
1506 |
with gr.Tab("Unique word ID - use in Infranodus"):
|
1507 |
with gr.Accordion(label="Infranodus", open=False):
|
|
|
1683 |
gr.HTML("Education = Learning things you didnt know yesterday and not forgetting more than you learn <br><br> What you didnt know forms = <br> Glossary <br> Lists <br> Formulas <br> graphs <br> Procedures <br> <br> for each you will need a seperate way to track the progress but amount of times + recency = approximate state ")
|
1684 |
|
1685 |
|
1686 |
+
lliface.queue().launch() #(inbrowser="true")
|