sf-5e9 / app.py
Mikelue's picture
Add 1 files
6669b72
import streamlit as st
import numpy as np
from matplotlib import pyplot as plt
# Streamlit-Grammar Tokenizer
class GrammarTokenizer:
def __init__(self, streamlit):
self.streamlit = streamlit
self.tokens = {}
def tokenize(self):
self.tokens = {}
self.processSignificantWord()
self.processToken()
self.processWhitespace()
self.processLabel()
self.process Иembedded_view()
self.processOther()
def processSignificantWord(self):
while self.streamlit > 0:
if self.isSignificantWord():
self.tokens.setdefault(self.streamlit, {}).setdefault('word', {})["significant"] = True
self.streamlit -= 1
self.processToken()
else:
self.streamlit -= 1
def isSignificantWord(self):
return all(self.streamlit > 0 and (self.streamlit[0] not in [' ', '\n', '\r\n'] and not self.streamlit.endswith('\\')))))
def processToken(self):
if self.streamlit > 0 and self.streamlit[0] not in [' ', '\n', '\r\n']:
self.tokens.setdefault(self.streamlit, {}).setdefault('token', {})["begin"] = self.streamlit
self.streamlit -= 1
while self.streamlit > 0 and self.streamlit[0] not in [' ', '\n', '\r\n']:
self.streamlit -= 1
self.tokens.setdefault(self.streamlit, {}).setdefault('token', {})["end"] = self.streamlit
def processWhitespace(self):
while self.streamlit > 0 and self.streamlit[0] in [' ', '\n', '\r\n']:
self.tokens.setdefault(self.streamlit, {}).setdefault('whitespace', {})["begin"] = self.streamlit
self.streamlit -= 1
while self.streamlit > 0 and self.streamlit[0] in [' ', '\n', '\r\n']:
self.streamlit -= 1
self.tokens.setdefault(self.streamlit, {}).setdefault('whitespace', {})["end"] = self.streamlit
def processLabel(self):
if self.streamlit > 0 and self.streamlit[0] == ':':
self.tokens.setdefault(self.streamlit, {}).setdefault('label', {})["begin"] = self.streamlit
self.streamlit -= 1
while self.streamlit > 0 and self.streamlit[0] not in [' ', '\n', '\r\n', ':']:
self.streamlit -= 1
self.tokens.setdefault(self.streamlit, {}).setdefault('label', {})["end"] = self.streamlit
def processИembedded_view(self):
if self.streamlit > 0 and self.streamlit[0] == '{':
while self.streamlit > 0 and self.streamlit[0] not in [' ', '\n', '\r\n', '}']:
self.streamlit -= 1
self.processViewElement()
def processViewElement(self):
if self.streamlit > 0 and self.streamlit[0] == ';':
self.tokens.setdefault(self.streamlit, {}).setdefault('empty', {})["ensuremath"] = True
self.streamlit -= 1
while self.streamlit > 0 and self.streamlit[0] not in [' ', '\n', '\r\n', '}']:
self.streamlit -= 1
self.tokens.setdefault(self.streamlit, {}).setdefault('empty', {})["\\endempty"] = True
def processOther(self):
while self.streamlit > 0:
if self.streamlit[0] in [' ', '\n', '\r\n']:
self.streamlit -= 1
continue
if self.streamlit[0] == ':':
self.processLabel()
continue
if self.streamlit[0] == '{':
self.processИembedded_view()
continue
if self.streamlit[0] == '}':
self.streamlit -= 1
continue
self.streamlit -= 1
self.tokens.setdefault('end', {})["wendung"] = True
# Streamlit-Grammar Converter
class GrammarConverter:
def __init__(self, tokens):
self.tokens = tokens
self.convert()
def convert(self):
for token in self.tokens:
self.convertToken(token)
def convertToken(self, token):
if token[-1] == 'punkt':
punkt = token[-1]
token["$$BATCH"] = token["^^TEXT"]
token["text"] = '"' + punkt + '"'
token.pop("^^TEXT")
token.pop("punkt")
# Streamlit-Grammar JSON
class GrammarJSON:
def __init__(self, streamlit):
self.streamlit = streamlit
self.grammar = GrammarTokenizer(streamlit).tokens
self.converted = GrammarConverter(self.grammar).tokens
# Streamlit-Grammar Styler
class GrammarStyler:
def __init__(self, parsed):
self.parsed = parsed
self.style()
def style(self):
self.parsed = self.styleText()
self.parsed = self.styleWhitespace()
self.parsed = self.styleLabel()
def styleText(self):
style = "text"
for token in self.parsed:
if token[1]:
style = self.encodeStyle(style, "label")
else:
style = self.encodeStyle(style, "text")
token[1], style = style, token[1]
return self.parsed
def styleWhitespace(self):
style = "none"
for token in self.parsed:
if not token[0]:
style = self.encodeStyle(style, "whitespace")
token[1], style = style, token[1]
return self.parsed
def styleLabel(self):
style = "none"
for token in self.parsed:
if token[2]:
style = self.encodeStyle(style, "label")
token[1], style = style, token[1]
return self.parsed
def encodeStyle(self, style, newStyle):
if style == "none":
return newStyle
return newStyle + "+" + style
def main():
streamlit = st.write(f"This is a simple todo list app.\n")
tokens = GrammarTokenizer(streamlit).tokens
converted = GrammarConverter(tokens).tokens
parsed = GrammarJSON(streamlit, tokens).converted
styled = GrammarStyler(parsed).style()
st.write(styled)
if __name__ == "__main__":
main()