Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -194,7 +194,9 @@ def merge_lines(roman_file, w4w_file, full_mean_file, macaronic_file):
|
|
194 |
TTSLangOptions = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt/text accent")
|
195 |
TTSLangOptions2 = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt/text accent")
|
196 |
|
197 |
-
def TTSforListeningPractice(text, language = "en"):
|
|
|
|
|
198 |
speech = gTTS(text=text, lang=language, slow="False")
|
199 |
speech.save("CurrentTTSFile.mp3")
|
200 |
#file = BytesIO()
|
@@ -764,6 +766,9 @@ def reading_randomize_words_in_paragraph(paragraph):
|
|
764 |
random.shuffle(sentences)
|
765 |
return '. '.join(sentences)
|
766 |
|
|
|
|
|
|
|
767 |
#-------------------------------------------------------------------------------------------------------------------------------
|
768 |
|
769 |
def arrealtimetestidea(img):
|
@@ -772,6 +777,31 @@ def arrealtimetestidea(img):
|
|
772 |
#------------------------------------------------------------------------------------------------------------------------------
|
773 |
|
774 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
775 |
# Define the Gradio interface inputs and outputs for video split
|
776 |
spvvideo_file_input = gr.File(label='Video File')
|
777 |
spvsubtitle_file_input = gr.File(label='Subtitle File')
|
@@ -784,9 +814,16 @@ groupoutput_text = gr.Textbox(label="Grouped words")
|
|
784 |
|
785 |
Translationchuncksize = gr.Number(value=4998)
|
786 |
|
|
|
|
|
|
|
|
|
|
|
787 |
with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
|
|
|
|
|
788 |
gr.HTML("<p>Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery </p>")
|
789 |
-
gr.HTML("""<a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> |
|
790 |
with gr.Row():
|
791 |
with gr.Column(scale=1):
|
792 |
with gr.Tab("Rep - Gradio"):
|
@@ -796,25 +833,28 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
796 |
gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, description="Word Grouping and Rotation - Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
|
797 |
with gr.Tab("Navigation"):
|
798 |
gr.HTML("Picture Annotation <br> Chorus Focused Word List <br> Merged Subtitles <br> Repetitive Audio (TTS) <br> Word and Sentence Jumbling <br> Unkown: Wordnet <br> Unknown: Wikipeadia <br>")
|
|
|
|
|
|
|
799 |
with gr.Tab("Vector Database = Memorisation"):
|
800 |
gr.HTML("Open AI - 2500000 character text = <1$ (0.0004 per 1000 tokens), Cohere Multilingual = free for personal use / Commercial use = \n Vector Database query = Better than text search but not for logical relationships")
|
801 |
with gr.Column(scale=3):
|
802 |
with gr.Tab("Beginner - Listen + Read"):
|
803 |
with gr.Row():
|
804 |
with gr.Column(scale=1):
|
805 |
-
gr.HTML("Listening - Songs - Chorus <br> Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <
|
806 |
-
gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions], outputs="audio", description="Paste chorus lyrics from below here and use TTS or make notes to save here (Or paste anything)")
|
807 |
gr.HTML("<p>Fastest way to learn words = is to have your own sound reference --> probably why babies learn fast as they make random noise</p> <p>If you know the flow of the song you can remember the spelling easier</p><p>Essentially if the sounds are repeated or long notes they are easy to remember</p>")
|
808 |
gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
|
809 |
gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
|
810 |
with gr.Column(scale=1):
|
811 |
-
gr.HTML("""Reading - Caption images (SD/Dalle-E) <br> <a href="https://huggingface.co/spaces/pharma/CLIP-Interrogator"> --Huggingface CLIP-Interrogator Space-- </a
|
812 |
gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt) - Describe from one side to the other to make guessing easy")
|
813 |
gr.HTML("Use Shift Enter To put text on new lines if the text doesnt fit <br> if theres an error you have to remove the foreign letters and place roman ones")
|
814 |
#with gr.Tab("Transcribe - RASMUS Whisper"):
|
815 |
#gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
|
816 |
with gr.Tab("Advanced - LingQ Addon Ideas"):
|
817 |
-
gr.HTML("<a href='https://www.lingq.com/en/'>Find
|
818 |
with gr.Tab("Visual - Multiline Custom Video Subtitles"):
|
819 |
gr.HTML("LingQ Companion Idea - i.e. Full Translation Read along, and eventually Videoplayer watch along like RAMUS whisper space <br><br>Extra functions needed - Persitent Sentence translation, UNWFWO, POS tagging and Word Count per user of words in their account. Macaronic Text is also another way to practice only the important information")
|
820 |
gr.HTML("""<hr> <p>For Transcripts to any video on youtube use the link below ⬇️</p> <a href="https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles">https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles</a> | <a href="https://huggingface.co/spaces/vumichien/whisper-speaker-diarization">https://huggingface.co/spaces/vumichien/whisper-speaker-diarization</a>""")
|
@@ -879,7 +919,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
879 |
with gr.Tab("Unique word ID - use in Infranodus"):
|
880 |
gr.Interface(fn=unique_word_count, inputs="text", outputs="text", description="Wordcounter")
|
881 |
gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], description="Word suggestions - Analyse the unique words in infranodus")
|
882 |
-
gr.Interface(fn=WikiSearch, inputs="text", outputs="text", description="Unique word suggestions(wiki articles)")
|
883 |
with gr.Tab("Automating related information linking"):
|
884 |
gr.HTML("Questions - Tacking and suggesting questions to ask = new education")
|
885 |
with gr.Tab("Thinking Practice"):
|
@@ -894,10 +934,11 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
894 |
gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file")
|
895 |
gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textarea", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.")
|
896 |
with gr.Tab("Automated Reading Assitant"):
|
897 |
-
gr.Textbox('Automating the Notetaking Tab either directly or using visual llm to use this interface efficiently')
|
|
|
898 |
gr.HTML("Tree and Branches approach to learning = familiarity with keywords/headings/summaries before reading the whole text <hr> Productivity/Work revolves around repitition which can be found looking for plurals and grouping terms eg. Headings and Hyper/Hyponyms Analysis")
|
899 |
with gr.Tab("AR"):
|
900 |
-
gr.Textbox("Alpha Test version = Real time Lablling of All things in view using SAM and Clip Interrogator and OpenCV on pydroid")
|
901 |
gr.HTML("Some Prompt ideas --> Prompt: Describe the place where these descriptions may be (You job is to be speculative for brainstorming purposes): A dog and a boy, the area is texas, the weather is sunny, the date is 01 May 2021 <hr> Prompt Content Ideas Ideas Clip Interrogator + Location Data aka tags for place, location and time + general news updates on the location + overview of the items in the location <br> Location based advise is most important but after that is information observed by appliances in the location eg. Times Computer turned on, times geyser inspected, amount of time keys havent been touched etc. <br> each location will have an ai personality that will relay more information ")
|
902 |
gr.HTML("<a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a>")
|
903 |
gr.Interface(fn=arrealtimetestidea, inputs='image', outputs="text", description="Vision Assistant - see and execute")
|
@@ -909,5 +950,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
909 |
gr.HTML("<p>Target 0: Mnemonics as title of images --> Comprehensible input <br> Target 1: Dual audio at word Level while using repitition to train random recall --> Word level Time <br> Target 2: Video --> Split by sentence --> each word repeated (60) + each phrase (10) + each sentence (10) --> TTS file for practice --> State Management/Known word Tracker <br>-----------------------<br> The trick is minimum one minute of focus on a new word --> Listening is hard because there are new word within seconds and you need repeated focus on each to learn </p> <p>Audio = best long form attention mechanism AS it is ANTICIPATION (Awareness of something before it happens like knowing song Lyrics) FOCUSED - Attention (Focused Repitition) + Exposure (Random Repitition) </p> <p>Listening is hard due to different word order and word combinations (collocations more important than single words)</p> <hr>")
|
910 |
gr.HTML("Predictable to identify the parts of picture being described --> The description moves in one direction from one side of the image to the other side is easiest <hr>")
|
911 |
gr.HTML("Image = instant comprehension like Stable Diffusion --> Audiovisual experience is the most optimal reading experience <br> Manga with summary descriptions for the chapters = Most aligned visual to audio experience")
|
|
|
|
|
912 |
|
913 |
lliface.queue().launch() #(inbrowser="true")
|
|
|
194 |
TTSLangOptions = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt/text accent")
|
195 |
TTSLangOptions2 = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt/text accent")
|
196 |
|
197 |
+
def TTSforListeningPractice(text, language = "en", Repeat10x = False):
|
198 |
+
if Repeat10x:
|
199 |
+
text = text * 10
|
200 |
speech = gTTS(text=text, lang=language, slow="False")
|
201 |
speech.save("CurrentTTSFile.mp3")
|
202 |
#file = BytesIO()
|
|
|
766 |
random.shuffle(sentences)
|
767 |
return '. '.join(sentences)
|
768 |
|
769 |
+
def changeexposuretext(text):
|
770 |
+
return f"<marquee> {text} </marquee>"
|
771 |
+
|
772 |
#-------------------------------------------------------------------------------------------------------------------------------
|
773 |
|
774 |
def arrealtimetestidea(img):
|
|
|
777 |
#------------------------------------------------------------------------------------------------------------------------------
|
778 |
|
779 |
|
780 |
+
LLPromptIdeas = """
|
781 |
+
|
782 |
+
<p> Make jokes while following rules for a syllogism jokes game:
|
783 |
+
|
784 |
+
The game can be played with any number of people.
|
785 |
+
One person starts by stating a syllogism, which is a logical argument that consists of three parts: a major premise, a minor premise, and a conclusion.
|
786 |
+
The next person must then state a syllogism that has the same conclusion as the first syllogism, but with different major and minor premises.
|
787 |
+
The game continues in this way until someone cannot think of a new syllogism.
|
788 |
+
The person who makes the last valid syllogism wins the game. </p>
|
789 |
+
|
790 |
+
<hr>
|
791 |
+
|
792 |
+
<p> Lets try this I will give you the english word and you find the <<chinese>> word and then mix the meaning of the word with the mnemonic </>
|
793 |
+
|
794 |
+
Bings Explanation
|
795 |
+
Youre welcome. Im happy to help. If I wanted to explain the rules of this game clearly and concisely, I would do something like this:
|
796 |
+
|
797 |
+
The game is about finding the <<Chinese>> word for an English word and making a mnemonic that mixes the meaning of the word with the mnemonic.
|
798 |
+
The game has two players: one who gives the English word and one who finds the <<Chinese>> word and makes the mnemonic.
|
799 |
+
The player who gives the English word can choose any word they want, as long as it has a <<Chinese>> equivalent.
|
800 |
+
The player who finds the <<Chinese>> word and makes the mnemonic can use any online or offline resources they want, such as dictionaries, translators, or search engines.
|
801 |
+
The player who finds the <<Chinese>> word and makes the mnemonic should write their answer in the format: The <<Chinese>> word for X is Y <<(pinyin)>>, which means Z. Here are some possible mnemonics that mix the meaning of the word with the mnemonic: A, B, C, D. </p>
|
802 |
+
"""
|
803 |
+
|
804 |
+
|
805 |
# Define the Gradio interface inputs and outputs for video split
|
806 |
spvvideo_file_input = gr.File(label='Video File')
|
807 |
spvsubtitle_file_input = gr.File(label='Subtitle File')
|
|
|
814 |
|
815 |
Translationchuncksize = gr.Number(value=4998)
|
816 |
|
817 |
+
randomExposuremessagelistitem = str(random.sample(["Bing mnemonic - lost = dont ignore unusual sounds here inside lost cave", "1000 verbs in lists of 100, verbs = easy setence structure estimation (SVO, SOV, etc.)", "Can put any message here in the navigatoin tab"], 1)).replace("['", "").replace("']", "")
|
818 |
+
randomExposuremessage = f" <marquee> { randomExposuremessagelistitem } </marquee> "
|
819 |
+
|
820 |
+
|
821 |
+
|
822 |
with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
|
823 |
+
PracticeExposure = gr.HTML(randomExposuremessage)
|
824 |
+
gr.HTML("Advanced Repitition = Combinatorics --> to understand a sentence properly you need understanding of every word --> in language that means use with other words --> Combos within the unique words in a sentence, paragraph, page, etc. --> as close to 3 word sentences")
|
825 |
gr.HTML("<p>Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery </p>")
|
826 |
+
gr.HTML(""" <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | """)
|
827 |
with gr.Row():
|
828 |
with gr.Column(scale=1):
|
829 |
with gr.Tab("Rep - Gradio"):
|
|
|
833 |
gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, description="Word Grouping and Rotation - Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
|
834 |
with gr.Tab("Navigation"):
|
835 |
gr.HTML("Picture Annotation <br> Chorus Focused Word List <br> Merged Subtitles <br> Repetitive Audio (TTS) <br> Word and Sentence Jumbling <br> Unkown: Wordnet <br> Unknown: Wikipeadia <br>")
|
836 |
+
PracticeExposureInput = gr.Textbox(placeholer="Exposure practice = look up", label="Exposure at the top")
|
837 |
+
PracticeExposurebtn = gr.Button("Change Default")
|
838 |
+
PracticeExposurebtn.click(fn=changeexposuretext, inputs=PracticeExposureInput, outputs=PracticeExposure)
|
839 |
with gr.Tab("Vector Database = Memorisation"):
|
840 |
gr.HTML("Open AI - 2500000 character text = <1$ (0.0004 per 1000 tokens), Cohere Multilingual = free for personal use / Commercial use = \n Vector Database query = Better than text search but not for logical relationships")
|
841 |
with gr.Column(scale=3):
|
842 |
with gr.Tab("Beginner - Listen + Read"):
|
843 |
with gr.Row():
|
844 |
with gr.Column(scale=1):
|
845 |
+
gr.HTML("Listening - Songs - Chorus <br> Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <br> <b>Tiny Stories dataset is like a graded reader</b> <br>")
|
846 |
+
gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions, "checkbox"], outputs="audio", description="Paste chorus lyrics from below here and use TTS or make notes to save here (Or paste anything)")
|
847 |
gr.HTML("<p>Fastest way to learn words = is to have your own sound reference --> probably why babies learn fast as they make random noise</p> <p>If you know the flow of the song you can remember the spelling easier</p><p>Essentially if the sounds are repeated or long notes they are easy to remember</p>")
|
848 |
gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
|
849 |
gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
|
850 |
with gr.Column(scale=1):
|
851 |
+
gr.HTML("""Reading - Caption images (SD/Dalle-E) <br> <a href='https://unsplash.com/'> -- Unsplash - free images -- </a> | <a href="https://huggingface.co/spaces/pharma/CLIP-Interrogator"> --Huggingface CLIP-Interrogator Space-- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- Tag2Text is faster than clip -- </a> | <br> <a href='https://huggingface.co/spaces/bkhmsi/Word-To-Image'> -- Transform word to an image -- </a> | <a href='https://huggingface.co/spaces/microsoft/Promptist'> -- Promptist (Microsoft) -- </a> | """)
|
852 |
gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt) - Describe from one side to the other to make guessing easy")
|
853 |
gr.HTML("Use Shift Enter To put text on new lines if the text doesnt fit <br> if theres an error you have to remove the foreign letters and place roman ones")
|
854 |
#with gr.Tab("Transcribe - RASMUS Whisper"):
|
855 |
#gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
|
856 |
with gr.Tab("Advanced - LingQ Addon Ideas"):
|
857 |
+
gr.HTML("<a href='https://www.lingq.com/en/'>Find LingQ Here --> https://www.lingq.com/en/</a>")
|
858 |
with gr.Tab("Visual - Multiline Custom Video Subtitles"):
|
859 |
gr.HTML("LingQ Companion Idea - i.e. Full Translation Read along, and eventually Videoplayer watch along like RAMUS whisper space <br><br>Extra functions needed - Persitent Sentence translation, UNWFWO, POS tagging and Word Count per user of words in their account. Macaronic Text is also another way to practice only the important information")
|
860 |
gr.HTML("""<hr> <p>For Transcripts to any video on youtube use the link below ⬇️</p> <a href="https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles">https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles</a> | <a href="https://huggingface.co/spaces/vumichien/whisper-speaker-diarization">https://huggingface.co/spaces/vumichien/whisper-speaker-diarization</a>""")
|
|
|
919 |
with gr.Tab("Unique word ID - use in Infranodus"):
|
920 |
gr.Interface(fn=unique_word_count, inputs="text", outputs="text", description="Wordcounter")
|
921 |
gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], description="Word suggestions - Analyse the unique words in infranodus")
|
922 |
+
gr.Interface(fn=WikiSearch, inputs="text", outputs="text", description="One word at a time Unique word suggestions (wiki articles)")
|
923 |
with gr.Tab("Automating related information linking"):
|
924 |
gr.HTML("Questions - Tacking and suggesting questions to ask = new education")
|
925 |
with gr.Tab("Thinking Practice"):
|
|
|
934 |
gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file")
|
935 |
gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textarea", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.")
|
936 |
with gr.Tab("Automated Reading Assitant"):
|
937 |
+
gr.Textbox('Parts of Speech based | Automating the Notetaking Tab either directly or using visual llm to use this interface efficiently')
|
938 |
+
gr.HTML("Types of comprehension agent <hr> Speed of Comprehension = Verb comprehension <br> From the following please extract the verbs <br> now explain each in context <br> Next, use picture descriptions for each word in the verb list <br> Create combinations using the verb list <hr> ")
|
939 |
gr.HTML("Tree and Branches approach to learning = familiarity with keywords/headings/summaries before reading the whole text <hr> Productivity/Work revolves around repitition which can be found looking for plurals and grouping terms eg. Headings and Hyper/Hyponyms Analysis")
|
940 |
with gr.Tab("AR"):
|
941 |
+
gr.Textbox("Alpha Test version = Real time Lablling of All things in view using SAM and Clip Interrogator and OpenCV on pydroid --> Adjusted Demo")
|
942 |
gr.HTML("Some Prompt ideas --> Prompt: Describe the place where these descriptions may be (You job is to be speculative for brainstorming purposes): A dog and a boy, the area is texas, the weather is sunny, the date is 01 May 2021 <hr> Prompt Content Ideas Ideas Clip Interrogator + Location Data aka tags for place, location and time + general news updates on the location + overview of the items in the location <br> Location based advise is most important but after that is information observed by appliances in the location eg. Times Computer turned on, times geyser inspected, amount of time keys havent been touched etc. <br> each location will have an ai personality that will relay more information ")
|
943 |
gr.HTML("<a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a>")
|
944 |
gr.Interface(fn=arrealtimetestidea, inputs='image', outputs="text", description="Vision Assistant - see and execute")
|
|
|
950 |
gr.HTML("<p>Target 0: Mnemonics as title of images --> Comprehensible input <br> Target 1: Dual audio at word Level while using repitition to train random recall --> Word level Time <br> Target 2: Video --> Split by sentence --> each word repeated (60) + each phrase (10) + each sentence (10) --> TTS file for practice --> State Management/Known word Tracker <br>-----------------------<br> The trick is minimum one minute of focus on a new word --> Listening is hard because there are new word within seconds and you need repeated focus on each to learn </p> <p>Audio = best long form attention mechanism AS it is ANTICIPATION (Awareness of something before it happens like knowing song Lyrics) FOCUSED - Attention (Focused Repitition) + Exposure (Random Repitition) </p> <p>Listening is hard due to different word order and word combinations (collocations more important than single words)</p> <hr>")
|
951 |
gr.HTML("Predictable to identify the parts of picture being described --> The description moves in one direction from one side of the image to the other side is easiest <hr>")
|
952 |
gr.HTML("Image = instant comprehension like Stable Diffusion --> Audiovisual experience is the most optimal reading experience <br> Manga with summary descriptions for the chapters = Most aligned visual to audio experience")
|
953 |
+
with gr.Tab("LLM Prompts and games"):
|
954 |
+
gr.HTML(LLPromptIdeas)
|
955 |
|
956 |
lliface.queue().launch() #(inbrowser="true")
|