diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..c84534fe3a8d7af42a01e77f99f7c590305ff7b4 Binary files /dev/null and b/.DS_Store differ diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..bf07816c74bac9b682df196e02c6482e474e9b52 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,31 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zstandard filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..13566b81b018ad684f3a35fee301741b2734c8f4 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,8 @@ +# Default ignored files +/shelf/ +/workspace.xml +# Editor-based HTTP Client requests +/httpRequests/ +# Datasource local storage ignored files +/dataSources/ +/dataSources.local.xml diff --git a/.idea/html-code-generation-from-images-with-deep-neural-networks.iml b/.idea/html-code-generation-from-images-with-deep-neural-networks.iml new file mode 100644 index 0000000000000000000000000000000000000000..74d515a027de98657e9d3d5f0f1831882fd81374 --- /dev/null +++ b/.idea/html-code-generation-from-images-with-deep-neural-networks.iml @@ -0,0 +1,10 @@ + + + + + + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 0000000000000000000000000000000000000000..eda176a234091f8fc14916c2f8c97f1298f0e933 --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,89 @@ + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99 --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000000000000000000000000000000000000..8591f264bff09854907601c45e4c219aa4ef80e3 --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000000000000000000000000000000000000..624821f1e83951b2941492b64cfe578e0f142dc1 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000000000000000000000000000000000000..94a25f7f4cb416c083d265558da75d457237d671 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..759995579c16ebbe1887add04be3ceba493e6ba7 --- /dev/null +++ b/README.md @@ -0,0 +1,14 @@ +--- +title: Image to HTML Code Demo +emoji: 🧑‍💻 +colorFrom: pink +colorTo: purple +sdk: gradio +sdk_version: 3.1.4 +app_file: app.py +pinned: false +license: afl-3.0 +duplicated_from: taneemishere/html-code-generation-from-images-with-deep-neural-networks +--- + +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/__pycache__/app-with_examples.cpython-38.pyc b/__pycache__/app-with_examples.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..631b8925427f863c345ccded835a5cf3281ab096 Binary files /dev/null and b/__pycache__/app-with_examples.cpython-38.pyc differ diff --git a/__pycache__/app.cpython-38.pyc b/__pycache__/app.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..510fad91d6f339ea9756b76f587fad684558f6f3 Binary files /dev/null and b/__pycache__/app.cpython-38.pyc differ diff --git a/__pycache__/main_program.cpython-38.pyc b/__pycache__/main_program.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39841824703dccb04afa5edc9a1f9d012f949428 Binary files /dev/null and b/__pycache__/main_program.cpython-38.pyc differ diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..0a34aaa4ec372e9867e07aa1029d38d513a25950 --- /dev/null +++ b/app.py @@ -0,0 +1,54 @@ +__author__ = 'Taneem Jan, taneemishere.github.io' + +import gradio as gr +import main_program + + +# our model's i/o method that take image from gradio interface's inputs.Image() +def model_interface(image): + return main_model(image) + + +# main method that call the main_program where code is generated and then compiled +def main_model(input_image): + result = main_program.main_method(input_image) + return result + + +interface_title = "

HTML Code Generation from Images with Deep Neural Networks

" +interface_description = """

Writing +code in a programming language for a designed mockup or a graphical user interface created by designers and UI +engineers, is done mostly by developers to build and develop custom websites and software. The development work is +not approachable by those unfamiliar with programming, to drive these personas capable of designing and developing +the code bases and website structures we come up with an automated system. In this work, we showed and proposed that +methods of deep learning and computer vision can be grasped to train a model that will automatically generate HTML +code from a single input mockup image and try to build an end-to-end automated system with accuracy more than +previous works for developing the structures of web pages.

""" + +interface_article = """

Limitations of Model

Certain limitations are there in the model some of them are listed below

+


Developed by Taneem Jan
Paper     +Code
""" + +interface_examples = ['examples/example-1.png', 'examples/example-2.png', 'examples/example-3.png'] + +# a gradio interface to convert a image to HTML Code +interface = gr.Interface( + model_interface, + inputs='image', + outputs='text', + allow_flagging="manual", + title=interface_title, + description=interface_description, + article=interface_article, + examples=interface_examples +) + +interface.launch(share=False) diff --git a/classes/.DS_Store b/classes/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..188e843762f62bcbfc3e8bd0274983be151e7b18 Binary files /dev/null and b/classes/.DS_Store differ diff --git a/classes/Sampler.py b/classes/Sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..629605b92b14cc1dcc14b746b4901ff360d31e57 --- /dev/null +++ b/classes/Sampler.py @@ -0,0 +1,59 @@ +from __future__ import print_function +from __future__ import absolute_import +__author__ = 'Taneem Jan, taneemishere.github.io' + +from .Vocabulary import * +from .Utils import * + + +class Sampler: + def __init__(self, voc_path, input_shape, output_size, context_length): + self.voc = Vocabulary() + self.voc.retrieve(voc_path) + + self.input_shape = input_shape + self.output_size = output_size + + print("Vocabulary size: {}".format(self.voc.size)) + print("Input shape: {}".format(self.input_shape)) + print("Output size: {}".format(self.output_size)) + + self.context_length = context_length + + def predict_greedy(self, model, input_img, require_sparse_label=True, sequence_length=150, verbose=False): + current_context = [self.voc.vocabulary[PLACEHOLDER]] * (self.context_length - 1) + current_context.append(self.voc.vocabulary[START_TOKEN]) + if require_sparse_label: + current_context = Utils.sparsify(current_context, self.output_size) + + predictions = START_TOKEN + out_probas = [] + + for i in range(0, sequence_length): + if verbose: + print("predicting {}/{}...".format(i, sequence_length)) + + probas = model.predict(input_img, np.array([current_context])) + prediction = np.argmax(probas) + out_probas.append(probas) + + new_context = [] + for j in range(1, self.context_length): + new_context.append(current_context[j]) + + if require_sparse_label: + sparse_label = np.zeros(self.output_size) + sparse_label[prediction] = 1 + new_context.append(sparse_label) + else: + new_context.append(prediction) + + current_context = new_context + + predictions += self.voc.token_lookup[prediction] + + if self.voc.token_lookup[prediction] == END_TOKEN: + break + + return predictions, out_probas + diff --git a/classes/Utils.py b/classes/Utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a7aff300380f793c941953e0d63ddf6d71281592 --- /dev/null +++ b/classes/Utils.py @@ -0,0 +1,39 @@ +__author__ = 'Taneem Jan, taneemishere.github.io' + +import numpy as np + + +class Utils: + @staticmethod + def sparsify(label_vector, output_size): + sparse_vector = [] + + for label in label_vector: + sparse_label = np.zeros(output_size) + sparse_label[label] = 1 + + sparse_vector.append(sparse_label) + + return np.array(sparse_vector) + + @staticmethod + def get_preprocessed_img(img_path, image_size): + import cv2 + # from keras.preprocessing.image import array_to_img, img_to_array + # img = array_to_img(img_path) + # img = img_to_array(img) + # img = cv2.imread(img_path) + # don't need to read the image as we're now directly passing the + # image as numpy array to this method + img = cv2.resize(img_path, (image_size, image_size)) + img = img.astype('float32') + img /= 255 + return img + + @staticmethod + def show(image): + import cv2 + cv2.namedWindow("view", cv2.WINDOW_AUTOSIZE) + cv2.imshow("view", image) + cv2.waitKey(0) + cv2.destroyWindow("view") diff --git a/classes/Vocabulary.py b/classes/Vocabulary.py new file mode 100644 index 0000000000000000000000000000000000000000..3b79c96dbf5200852ece221cdd9a60bfbf0865ab --- /dev/null +++ b/classes/Vocabulary.py @@ -0,0 +1,78 @@ +__author__ = 'Taneem Jan, taneemishere.github.io' + +import sys +import numpy as np + +START_TOKEN = "" +END_TOKEN = "" +PLACEHOLDER = " " +SEPARATOR = '->' + + +class Vocabulary: + def __init__(self): + self.binary_vocabulary = {} + self.vocabulary = {} + self.token_lookup = {} + self.size = 0 + + self.append(START_TOKEN) + self.append(END_TOKEN) + self.append(PLACEHOLDER) + + def append(self, token): + if token not in self.vocabulary: + self.vocabulary[token] = self.size + self.token_lookup[self.size] = token + self.size += 1 + + def create_binary_representation(self): + if sys.version_info >= (3,): + items = self.vocabulary.items() + else: + items = self.vocabulary.iteritems() + for key, value in items: + binary = np.zeros(self.size) + binary[value] = 1 + self.binary_vocabulary[key] = binary + + def get_serialized_binary_representation(self): + if len(self.binary_vocabulary) == 0: + self.create_binary_representation() + + string = "" + if sys.version_info >= (3,): + items = self.binary_vocabulary.items() + else: + items = self.binary_vocabulary.iteritems() + for key, value in items: + array_as_string = np.array2string(value, separator=',', max_line_width=self.size * self.size) + string += "{}{}{}\n".format(key, SEPARATOR, array_as_string[1:len(array_as_string) - 1]) + return string + + def save(self, path): + output_file_name = "{}/words.vocab".format(path) + output_file = open(output_file_name, 'w') + output_file.write(self.get_serialized_binary_representation()) + output_file.close() + + def retrieve(self, path): + input_file = open("{}/words.vocab".format(path), 'r') + buffer = "" + for line in input_file: + try: + separator_position = len(buffer) + line.index(SEPARATOR) + buffer += line + key = buffer[:separator_position] + value = buffer[separator_position + len(SEPARATOR):] + value = np.fromstring(value, sep=',') + + self.binary_vocabulary[key] = value + self.vocabulary[key] = np.where(value == 1)[0][0] + self.token_lookup[np.where(value == 1)[0][0]] = key + + buffer = "" + except ValueError: + buffer += line + input_file.close() + self.size = len(self.vocabulary) diff --git a/classes/__init__.py b/classes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/classes/__pycache__/BeamSearch.cpython-35.pyc b/classes/__pycache__/BeamSearch.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3958da0d8a41bd06113468ab8672291190d71717 Binary files /dev/null and b/classes/__pycache__/BeamSearch.cpython-35.pyc differ diff --git a/classes/__pycache__/BeamSearch.cpython-38.pyc b/classes/__pycache__/BeamSearch.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b92b3c3c798a196fd69769ed629560695ea4753 Binary files /dev/null and b/classes/__pycache__/BeamSearch.cpython-38.pyc differ diff --git a/classes/__pycache__/BeamSearch.cpython-39.pyc b/classes/__pycache__/BeamSearch.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97108c6147f58fe0c32014bce00c2882767475a4 Binary files /dev/null and b/classes/__pycache__/BeamSearch.cpython-39.pyc differ diff --git a/classes/__pycache__/Sampler.cpython-35.pyc b/classes/__pycache__/Sampler.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a81725051ffb057e2aa2a26c4ac7683626d1b95 Binary files /dev/null and b/classes/__pycache__/Sampler.cpython-35.pyc differ diff --git a/classes/__pycache__/Sampler.cpython-38.pyc b/classes/__pycache__/Sampler.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5800c637b774e5a9459d8fff407d15a099666b3 Binary files /dev/null and b/classes/__pycache__/Sampler.cpython-38.pyc differ diff --git a/classes/__pycache__/Sampler.cpython-39.pyc b/classes/__pycache__/Sampler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfa8cbb35430d1f93e685aefd01aa7e288f8b5e3 Binary files /dev/null and b/classes/__pycache__/Sampler.cpython-39.pyc differ diff --git a/classes/__pycache__/Utils.cpython-35.pyc b/classes/__pycache__/Utils.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cf9d83842eeccf103752bdbf9c2d1a6e47e09ea Binary files /dev/null and b/classes/__pycache__/Utils.cpython-35.pyc differ diff --git a/classes/__pycache__/Utils.cpython-38.pyc b/classes/__pycache__/Utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..669c0586962c09b906269f4388cfca9bef84fea8 Binary files /dev/null and b/classes/__pycache__/Utils.cpython-38.pyc differ diff --git a/classes/__pycache__/Utils.cpython-39.pyc b/classes/__pycache__/Utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c043348897a2660f115f7644c4e4da8714d361cf Binary files /dev/null and b/classes/__pycache__/Utils.cpython-39.pyc differ diff --git a/classes/__pycache__/Vocabulary.cpython-35.pyc b/classes/__pycache__/Vocabulary.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3c91c9ff54126b2f5dfe33cabf60d0915c1aa99 Binary files /dev/null and b/classes/__pycache__/Vocabulary.cpython-35.pyc differ diff --git a/classes/__pycache__/Vocabulary.cpython-38.pyc b/classes/__pycache__/Vocabulary.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7edba6c128d12303ee0206cf7dc68371e4cb7088 Binary files /dev/null and b/classes/__pycache__/Vocabulary.cpython-38.pyc differ diff --git a/classes/__pycache__/Vocabulary.cpython-39.pyc b/classes/__pycache__/Vocabulary.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..455f59811f7b45b740873f74b2f06a4d6cae0b48 Binary files /dev/null and b/classes/__pycache__/Vocabulary.cpython-39.pyc differ diff --git a/classes/__pycache__/__init__.cpython-35.pyc b/classes/__pycache__/__init__.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e13cb194af535e100671c575526a2430dc69d9f Binary files /dev/null and b/classes/__pycache__/__init__.cpython-35.pyc differ diff --git a/classes/__pycache__/__init__.cpython-38.pyc b/classes/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff47b0328255c7bae070838187ade905d673fc8f Binary files /dev/null and b/classes/__pycache__/__init__.cpython-38.pyc differ diff --git a/classes/__pycache__/__init__.cpython-39.pyc b/classes/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14b7332a0833d4d79745b6dbda920e7be9a3f438 Binary files /dev/null and b/classes/__pycache__/__init__.cpython-39.pyc differ diff --git a/classes/model/.DS_Store b/classes/model/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..679b956c2c5ea8b882d000f5df74e37c0679c593 Binary files /dev/null and b/classes/model/.DS_Store differ diff --git a/classes/model/AModel.py b/classes/model/AModel.py new file mode 100644 index 0000000000000000000000000000000000000000..ddfc945e8eaeb0088bcaf1976a59b2c20d114f32 --- /dev/null +++ b/classes/model/AModel.py @@ -0,0 +1,25 @@ +__author__ = 'Taneem Jan, taneemishere.github.io' + +from keras.models import model_from_json + + +class AModel: + def __init__(self, input_shape, output_size, output_path): + self.model = None + self.input_shape = input_shape + self.output_size = output_size + self.output_path = output_path + self.name = "" + + def save(self): + model_json = self.model.to_json() + with open("{}/{}.json".format(self.output_path, self.name), "w") as json_file: + json_file.write(model_json) + self.model.save_weights("{}/{}.h5".format(self.output_path, self.name)) + + def load(self, name=""): + output_name = self.name if name == "" else name + with open("{}/{}.json".format(self.output_path, output_name), "r") as json_file: + loaded_model_json = json_file.read() + self.model = model_from_json(loaded_model_json) + self.model.load_weights("{}/{}.h5".format(self.output_path, output_name)) diff --git a/classes/model/Config.py b/classes/model/Config.py new file mode 100644 index 0000000000000000000000000000000000000000..9610d2db7d3396ab9ce463b5b14dfaff19448ac1 --- /dev/null +++ b/classes/model/Config.py @@ -0,0 +1,7 @@ +__author__ = 'Taneem Jan, taneemishere.github.io' + +CONTEXT_LENGTH = 48 +IMAGE_SIZE = 256 +BATCH_SIZE = 64 +EPOCHS = 10 +STEPS_PER_EPOCH = 72000 diff --git a/classes/model/Main_Model.py b/classes/model/Main_Model.py new file mode 100644 index 0000000000000000000000000000000000000000..e3ba6aae6965dd51ed4ebb959828778b5950d9e2 --- /dev/null +++ b/classes/model/Main_Model.py @@ -0,0 +1,71 @@ +__author__ = 'Taneem Jan, improved the old model through pretrained Auto-encoders' + +from keras.layers import Input, Dense, Dropout, RepeatVector, LSTM, concatenate, Flatten +from keras.models import Sequential, Model +from tensorflow.keras.optimizers import RMSprop +from .Config import * +from .AModel import * +from .autoencoder_image import * + + +class Main_Model(AModel): + def __init__(self, input_shape, output_size, output_path): + AModel.__init__(self, input_shape, output_size, output_path) + self.name = "Main_Model" + + visual_input = Input(shape=input_shape) + + # Load the pre-trained autoencoder model + autoencoder_model = autoencoder_image(input_shape, input_shape, output_path) + autoencoder_model.load('autoencoder') + path = "classes/model/bin/" + path_to_autoencoder = "{}autoencoder.h5".format(path) + autoencoder_model.model.load_weights(path_to_autoencoder) + # Get only the model up to the encoded part + hidden_layer_model_freeze = Model( + inputs=autoencoder_model.model.input, + outputs=autoencoder_model.model.get_layer('encoded_layer').output + ) + hidden_layer_input = hidden_layer_model_freeze(visual_input) + + # Additional layers before concatenation + hidden_layer_model = Flatten()(hidden_layer_input) + hidden_layer_model = Dense(1024, activation='relu')(hidden_layer_model) + hidden_layer_model = Dropout(0.3)(hidden_layer_model) + hidden_layer_model = Dense(1024, activation='relu')(hidden_layer_model) + hidden_layer_model = Dropout(0.3)(hidden_layer_model) + hidden_layer_result = RepeatVector(CONTEXT_LENGTH)(hidden_layer_model) + + # Making sure the loaded hidden_layer_model_freeze will no longer be updated + for layer in hidden_layer_model_freeze.layers: + layer.trainable = False + + # The same language model that of pix2code by Tony Beltramelli + language_model = Sequential() + language_model.add(LSTM(128, return_sequences=True, input_shape=(CONTEXT_LENGTH, output_size))) + language_model.add(LSTM(128, return_sequences=True)) + + textual_input = Input(shape=(CONTEXT_LENGTH, output_size)) + encoded_text = language_model(textual_input) + + decoder = concatenate([hidden_layer_result, encoded_text]) + + decoder = LSTM(512, return_sequences=True)(decoder) + decoder = LSTM(512, return_sequences=False)(decoder) + decoder = Dense(output_size, activation='softmax')(decoder) + + self.model = Model(inputs=[visual_input, textual_input], outputs=decoder) + + optimizer = RMSprop(learning_rate=0.0001, clipvalue=1.0) + self.model.compile(loss='categorical_crossentropy', optimizer=optimizer) + + def fit_generator(self, generator, steps_per_epoch): + # self.model.summary() + self.model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, verbose=1) + self.save() + + def predict(self, image, partial_caption): + return self.model.predict([image, partial_caption], verbose=0)[0] + + def predict_batch(self, images, partial_captions): + return self.model.predict([images, partial_captions], verbose=1) diff --git a/classes/model/__init__.py b/classes/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/classes/model/__pycache__/AModel.cpython-35.pyc b/classes/model/__pycache__/AModel.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..250f9058b96a6549a0e6314a103f75c82b10e75e Binary files /dev/null and b/classes/model/__pycache__/AModel.cpython-35.pyc differ diff --git a/classes/model/__pycache__/AModel.cpython-38.pyc b/classes/model/__pycache__/AModel.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..375284141b8d49fe71f6477efde0b6dbe5131879 Binary files /dev/null and b/classes/model/__pycache__/AModel.cpython-38.pyc differ diff --git a/classes/model/__pycache__/Config.cpython-35.pyc b/classes/model/__pycache__/Config.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38951dc509a0a0210d92507d38b898d586d8ad17 Binary files /dev/null and b/classes/model/__pycache__/Config.cpython-35.pyc differ diff --git a/classes/model/__pycache__/Config.cpython-38.pyc b/classes/model/__pycache__/Config.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c57b0b20b596b8822497496d1ef62b74239ce303 Binary files /dev/null and b/classes/model/__pycache__/Config.cpython-38.pyc differ diff --git a/classes/model/__pycache__/Main_Model.cpython-38.pyc b/classes/model/__pycache__/Main_Model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02f955eb3d8c2c055d4c87a23887ef69dee50285 Binary files /dev/null and b/classes/model/__pycache__/Main_Model.cpython-38.pyc differ diff --git a/classes/model/__pycache__/__init__.cpython-35.pyc b/classes/model/__pycache__/__init__.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5a520d7d4d80a02f155c4e23d021ccc60227771 Binary files /dev/null and b/classes/model/__pycache__/__init__.cpython-35.pyc differ diff --git a/classes/model/__pycache__/__init__.cpython-38.pyc b/classes/model/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0899786f8832f40974e8757a72a6ca66e1369722 Binary files /dev/null and b/classes/model/__pycache__/__init__.cpython-38.pyc differ diff --git a/classes/model/__pycache__/__init__.cpython-39.pyc b/classes/model/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a2b419f63a1f119d544e849ec22988765b941cc Binary files /dev/null and b/classes/model/__pycache__/__init__.cpython-39.pyc differ diff --git a/classes/model/__pycache__/autoencoder_image.cpython-35.pyc b/classes/model/__pycache__/autoencoder_image.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37f2362f796f6dc68a5ed50b3ebf656f8225e982 Binary files /dev/null and b/classes/model/__pycache__/autoencoder_image.cpython-35.pyc differ diff --git a/classes/model/__pycache__/autoencoder_image.cpython-38.pyc b/classes/model/__pycache__/autoencoder_image.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..822dcba5632b01846e3f19b76bd1282fc7fa8d24 Binary files /dev/null and b/classes/model/__pycache__/autoencoder_image.cpython-38.pyc differ diff --git a/classes/model/__pycache__/pix2code.cpython-35.pyc b/classes/model/__pycache__/pix2code.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc7d7abe2e9ee613333a164bf3e37162caa0ab8e Binary files /dev/null and b/classes/model/__pycache__/pix2code.cpython-35.pyc differ diff --git a/classes/model/autoencoder_image.py b/classes/model/autoencoder_image.py new file mode 100644 index 0000000000000000000000000000000000000000..f4ddc426c2abee8a4e10d5a2b0b6e69e50df3ee0 --- /dev/null +++ b/classes/model/autoencoder_image.py @@ -0,0 +1,59 @@ +__author__ = 'Taneem Jan, improved the old model through pretrained Auto-encoders' + +from keras.layers import Input, Dropout, Conv2D, MaxPooling2D, Conv2DTranspose, UpSampling2D +from keras.models import Model +from .Config import * +from .AModel import * + + +class autoencoder_image(AModel): + def __init__(self, input_shape, output_size, output_path): + AModel.__init__(self, input_shape, output_size, output_path) + self.name = 'autoencoder' + + input_image = Input(shape=input_shape) + encoder = Conv2D(32, 3, padding='same', activation='relu')(input_image) + encoder = Conv2D(32, 3, padding='same', activation='relu')(encoder) + encoder = MaxPooling2D()(encoder) + encoder = Dropout(0.25)(encoder) + + encoder = Conv2D(64, 3, padding='same', activation='relu')(encoder) + encoder = Conv2D(64, 3, padding='same', activation='relu')(encoder) + encoder = MaxPooling2D()(encoder) + encoder = Dropout(0.25)(encoder) + + encoder = Conv2D(128, 3, padding='same', activation='relu')(encoder) + encoder = Conv2D(128, 3, padding='same', activation='relu')(encoder) + encoder = MaxPooling2D()(encoder) + encoded = Dropout(0.25, name='encoded_layer')(encoder) + + decoder = Conv2DTranspose(128, 3, padding='same', activation='relu')(encoded) + decoder = Conv2DTranspose(128, 3, padding='same', activation='relu')(decoder) + decoder = UpSampling2D()(decoder) + decoder = Dropout(0.25)(decoder) + + decoder = Conv2DTranspose(64, 3, padding='same', activation='relu')(decoder) + decoder = Conv2DTranspose(64, 3, padding='same', activation='relu')(decoder) + decoder = UpSampling2D()(decoder) + decoder = Dropout(0.25)(decoder) + + decoder = Conv2DTranspose(32, 3, padding='same', activation='relu')(decoder) + decoder = Conv2DTranspose(3, 3, padding='same', activation='relu')(decoder) + decoder = UpSampling2D()(decoder) + decoded = Dropout(0.25)(decoder) + + # decoder = Dense(256*256*3)(decoder) + # decoded = Reshape(target_shape=input_shape)(decoder) + + self.model = Model(input_image, decoded) + self.model.compile(optimizer='adadelta', loss='binary_crossentropy') + + # self.model.summary() + + def fit_generator(self, generator, steps_per_epoch): + self.model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, verbose=1) + self.save() + + def predict_hidden(self, images): + hidden_layer_model = Model(inputs=self.input, outputs=self.get_layer('encoded_layer').output) + return hidden_layer_model.predict(images) diff --git a/classes/model/bin/.DS_Store b/classes/model/bin/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..2873e37a9f1792fff7676dd52439ce2cd08fe2f0 Binary files /dev/null and b/classes/model/bin/.DS_Store differ diff --git a/classes/model/bin/Main_Model.h5 b/classes/model/bin/Main_Model.h5 new file mode 100644 index 0000000000000000000000000000000000000000..2be73211d3e6c2d871d31dc00ae9cdbb88d87ac9 --- /dev/null +++ b/classes/model/bin/Main_Model.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:383193660ed3f29a378cec7afa7fb87ffbb938c7122c977fbb6bc3db8a0d8523 +size 565169576 diff --git a/classes/model/bin/Main_Model.json b/classes/model/bin/Main_Model.json new file mode 100644 index 0000000000000000000000000000000000000000..cf9d1a858a68c2ce29b193b9e8f4041bc6348b18 --- /dev/null +++ b/classes/model/bin/Main_Model.json @@ -0,0 +1 @@ +{"config": {"name": "model_3", "layers": [{"name": "input_1", "config": {"dtype": "float32", "sparse": false, "batch_input_shape": [null, 256, 256, 3], "name": "input_1"}, "inbound_nodes": [], "class_name": "InputLayer"}, {"name": "model_2", "config": {"name": "model_2", "layers": [{"name": "input_1", "config": {"dtype": "float32", "sparse": false, "batch_input_shape": [null, 256, 256, 3], "name": "input_1"}, "inbound_nodes": [], "class_name": "InputLayer"}, {"name": "conv2d_1", "config": {"kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "strides": [1, 1], "use_bias": true, "padding": "same", "bias_regularizer": null, "name": "conv2d_1", "data_format": "channels_last", "trainable": false, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "filters": 32, "kernel_size": [3, 3], "bias_constraint": null, "kernel_regularizer": null, "kernel_constraint": null, "dilation_rate": [1, 1], "activity_regularizer": null, "activation": "relu"}, "inbound_nodes": [[["input_1", 0, 0, {}]]], "class_name": "Conv2D"}, {"name": "conv2d_2", "config": {"kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "strides": [1, 1], "use_bias": true, "padding": "same", "bias_regularizer": null, "name": "conv2d_2", "data_format": "channels_last", "trainable": false, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "filters": 32, "kernel_size": [3, 3], "bias_constraint": null, "kernel_regularizer": null, "kernel_constraint": null, "dilation_rate": [1, 1], "activity_regularizer": null, "activation": "relu"}, "inbound_nodes": [[["conv2d_1", 0, 0, {}]]], "class_name": "Conv2D"}, {"name": "max_pooling2d_1", "config": {"strides": [2, 2], "name": "max_pooling2d_1", "data_format": "channels_last", "trainable": false, "pool_size": [2, 2], "padding": "valid"}, "inbound_nodes": [[["conv2d_2", 0, 0, {}]]], "class_name": "MaxPooling2D"}, {"name": "dropout_1", "config": {"name": "dropout_1", "trainable": false, "noise_shape": null, "seed": null, "rate": 0.25}, "inbound_nodes": [[["max_pooling2d_1", 0, 0, {}]]], "class_name": "Dropout"}, {"name": "conv2d_3", "config": {"kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "strides": [1, 1], "use_bias": true, "padding": "same", "bias_regularizer": null, "name": "conv2d_3", "data_format": "channels_last", "trainable": false, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "filters": 64, "kernel_size": [3, 3], "bias_constraint": null, "kernel_regularizer": null, "kernel_constraint": null, "dilation_rate": [1, 1], "activity_regularizer": null, "activation": "relu"}, "inbound_nodes": [[["dropout_1", 0, 0, {}]]], "class_name": "Conv2D"}, {"name": "conv2d_4", "config": {"kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "strides": [1, 1], "use_bias": true, "padding": "same", "bias_regularizer": null, "name": "conv2d_4", "data_format": "channels_last", "trainable": false, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "filters": 64, "kernel_size": [3, 3], "bias_constraint": null, "kernel_regularizer": null, "kernel_constraint": null, "dilation_rate": [1, 1], "activity_regularizer": null, "activation": "relu"}, "inbound_nodes": [[["conv2d_3", 0, 0, {}]]], "class_name": "Conv2D"}, {"name": "max_pooling2d_2", "config": {"strides": [2, 2], "name": "max_pooling2d_2", "data_format": "channels_last", "trainable": false, "pool_size": [2, 2], "padding": "valid"}, "inbound_nodes": [[["conv2d_4", 0, 0, {}]]], "class_name": "MaxPooling2D"}, {"name": "dropout_2", "config": {"name": "dropout_2", "trainable": false, "noise_shape": null, "seed": null, "rate": 0.25}, "inbound_nodes": [[["max_pooling2d_2", 0, 0, {}]]], "class_name": "Dropout"}, {"name": "conv2d_5", "config": {"kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "strides": [1, 1], "use_bias": true, "padding": "same", "bias_regularizer": null, "name": "conv2d_5", "data_format": "channels_last", "trainable": false, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "filters": 128, "kernel_size": [3, 3], "bias_constraint": null, "kernel_regularizer": null, "kernel_constraint": null, "dilation_rate": [1, 1], "activity_regularizer": null, "activation": "relu"}, "inbound_nodes": [[["dropout_2", 0, 0, {}]]], "class_name": "Conv2D"}, {"name": "conv2d_6", "config": {"kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "strides": [1, 1], "use_bias": true, "padding": "same", "bias_regularizer": null, "name": "conv2d_6", "data_format": "channels_last", "trainable": false, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "filters": 128, "kernel_size": [3, 3], "bias_constraint": null, "kernel_regularizer": null, "kernel_constraint": null, "dilation_rate": [1, 1], "activity_regularizer": null, "activation": "relu"}, "inbound_nodes": [[["conv2d_5", 0, 0, {}]]], "class_name": "Conv2D"}, {"name": "max_pooling2d_3", "config": {"strides": [2, 2], "name": "max_pooling2d_3", "data_format": "channels_last", "trainable": false, "pool_size": [2, 2], "padding": "valid"}, "inbound_nodes": [[["conv2d_6", 0, 0, {}]]], "class_name": "MaxPooling2D"}, {"name": "encoded_layer", "config": {"name": "encoded_layer", "trainable": false, "noise_shape": null, "seed": null, "rate": 0.25}, "inbound_nodes": [[["max_pooling2d_3", 0, 0, {}]]], "class_name": "Dropout"}], "input_layers": [["input_1", 0, 0]], "output_layers": [["encoded_layer", 0, 0]]}, "inbound_nodes": [[["input_1", 0, 0, {}]]], "class_name": "Model"}, {"name": "flatten_1", "config": {"name": "flatten_1", "trainable": true}, "inbound_nodes": [[["model_2", 1, 0, {}]]], "class_name": "Flatten"}, {"name": "dense_1", "config": {"units": 1024, "kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "use_bias": true, "bias_regularizer": null, "name": "dense_1", "trainable": true, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "bias_constraint": null, "kernel_regularizer": null, "kernel_constraint": null, "activity_regularizer": null, "activation": "relu"}, "inbound_nodes": [[["flatten_1", 0, 0, {}]]], "class_name": "Dense"}, {"name": "dropout_6", "config": {"name": "dropout_6", "trainable": true, "noise_shape": null, "seed": null, "rate": 0.3}, "inbound_nodes": [[["dense_1", 0, 0, {}]]], "class_name": "Dropout"}, {"name": "dense_2", "config": {"units": 1024, "kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "use_bias": true, "bias_regularizer": null, "name": "dense_2", "trainable": true, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "bias_constraint": null, "kernel_regularizer": null, "kernel_constraint": null, "activity_regularizer": null, "activation": "relu"}, "inbound_nodes": [[["dropout_6", 0, 0, {}]]], "class_name": "Dense"}, {"name": "dropout_7", "config": {"name": "dropout_7", "trainable": true, "noise_shape": null, "seed": null, "rate": 0.3}, "inbound_nodes": [[["dense_2", 0, 0, {}]]], "class_name": "Dropout"}, {"name": "input_3", "config": {"dtype": "float32", "sparse": false, "batch_input_shape": [null, 48, 19], "name": "input_3"}, "inbound_nodes": [], "class_name": "InputLayer"}, {"name": "repeat_vector_1", "config": {"name": "repeat_vector_1", "trainable": true, "n": 48}, "inbound_nodes": [[["dropout_7", 0, 0, {}]]], "class_name": "RepeatVector"}, {"name": "sequential_1", "config": [{"config": {"recurrent_dropout": 0.0, "kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "go_backwards": false, "use_bias": true, "bias_regularizer": null, "recurrent_initializer": {"config": {"gain": 1.0, "seed": null}, "class_name": "Orthogonal"}, "trainable": true, "stateful": false, "dtype": "float32", "recurrent_regularizer": null, "kernel_regularizer": null, "recurrent_constraint": null, "recurrent_activation": "hard_sigmoid", "units": 128, "batch_input_shape": [null, 48, 19], "name": "lstm_1", "unroll": false, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "dropout": 0.0, "implementation": 1, "return_state": false, "return_sequences": true, "bias_constraint": null, "unit_forget_bias": true, "kernel_constraint": null, "activity_regularizer": null, "activation": "tanh"}, "class_name": "LSTM"}, {"config": {"recurrent_dropout": 0.0, "kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "go_backwards": false, "use_bias": true, "bias_regularizer": null, "recurrent_initializer": {"config": {"gain": 1.0, "seed": null}, "class_name": "Orthogonal"}, "trainable": true, "stateful": false, "recurrent_regularizer": null, "kernel_regularizer": null, "recurrent_constraint": null, "recurrent_activation": "hard_sigmoid", "units": 128, "name": "lstm_2", "unroll": false, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "dropout": 0.0, "implementation": 1, "return_state": false, "return_sequences": true, "bias_constraint": null, "unit_forget_bias": true, "kernel_constraint": null, "activity_regularizer": null, "activation": "tanh"}, "class_name": "LSTM"}], "inbound_nodes": [[["input_3", 0, 0, {}]]], "class_name": "Sequential"}, {"name": "concatenate_1", "config": {"name": "concatenate_1", "trainable": true, "axis": -1}, "inbound_nodes": [[["repeat_vector_1", 0, 0, {}], ["sequential_1", 1, 0, {}]]], "class_name": "Concatenate"}, {"name": "lstm_3", "config": {"recurrent_dropout": 0.0, "units": 512, "go_backwards": false, "use_bias": true, "bias_regularizer": null, "recurrent_initializer": {"config": {"gain": 1.0, "seed": null}, "class_name": "Orthogonal"}, "trainable": true, "recurrent_regularizer": null, "kernel_regularizer": null, "recurrent_constraint": null, "recurrent_activation": "hard_sigmoid", "kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "kernel_constraint": null, "name": "lstm_3", "unroll": false, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "dropout": 0.0, "implementation": 1, "return_state": false, "return_sequences": true, "bias_constraint": null, "unit_forget_bias": true, "stateful": false, "activity_regularizer": null, "activation": "tanh"}, "inbound_nodes": [[["concatenate_1", 0, 0, {}]]], "class_name": "LSTM"}, {"name": "lstm_4", "config": {"recurrent_dropout": 0.0, "units": 512, "go_backwards": false, "use_bias": true, "bias_regularizer": null, "recurrent_initializer": {"config": {"gain": 1.0, "seed": null}, "class_name": "Orthogonal"}, "trainable": true, "recurrent_regularizer": null, "kernel_regularizer": null, "recurrent_constraint": null, "recurrent_activation": "hard_sigmoid", "kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "kernel_constraint": null, "name": "lstm_4", "unroll": false, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "dropout": 0.0, "implementation": 1, "return_state": false, "return_sequences": false, "bias_constraint": null, "unit_forget_bias": true, "stateful": false, "activity_regularizer": null, "activation": "tanh"}, "inbound_nodes": [[["lstm_3", 0, 0, {}]]], "class_name": "LSTM"}, {"name": "dense_3", "config": {"units": 19, "kernel_initializer": {"config": {"scale": 1.0, "distribution": "uniform", "seed": null, "mode": "fan_avg"}, "class_name": "VarianceScaling"}, "use_bias": true, "bias_regularizer": null, "name": "dense_3", "trainable": true, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "bias_constraint": null, "kernel_regularizer": null, "kernel_constraint": null, "activity_regularizer": null, "activation": "softmax"}, "inbound_nodes": [[["lstm_4", 0, 0, {}]]], "class_name": "Dense"}], "input_layers": [["input_1", 0, 0], ["input_3", 0, 0]], "output_layers": [["dense_3", 0, 0]]}, "keras_version": "2.1.2", "backend": "tensorflow", "class_name": "Model"} \ No newline at end of file diff --git a/classes/model/bin/autoencoder.h5 b/classes/model/bin/autoencoder.h5 new file mode 100644 index 0000000000000000000000000000000000000000..e6b01d4bebaf7277669101d322d8fd6bdd9f7804 --- /dev/null +++ b/classes/model/bin/autoencoder.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86d9ae3ae4c294424d3bf56f448e916893eb5374907a211d09712ec3476855b5 +size 2901584 diff --git a/classes/model/bin/autoencoder.json b/classes/model/bin/autoencoder.json new file mode 100644 index 0000000000000000000000000000000000000000..f075c9ec9e425db6f624e535439ce563272e09e4 --- /dev/null +++ b/classes/model/bin/autoencoder.json @@ -0,0 +1 @@ +{"keras_version": "2.1.2", "config": {"layers": [{"name": "input_1", "config": {"name": "input_1", "sparse": false, "batch_input_shape": [null, 256, 256, 3], "dtype": "float32"}, "class_name": "InputLayer", "inbound_nodes": []}, {"name": "conv2d_1", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "trainable": true, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 32, "kernel_regularizer": null, "kernel_size": [3, 3], "name": "conv2d_1", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "dilation_rate": [1, 1], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2D", "inbound_nodes": [[["input_1", 0, 0, {}]]]}, {"name": "conv2d_2", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "trainable": true, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 32, "kernel_regularizer": null, "kernel_size": [3, 3], "name": "conv2d_2", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "dilation_rate": [1, 1], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2D", "inbound_nodes": [[["conv2d_1", 0, 0, {}]]]}, {"name": "max_pooling2d_1", "config": {"name": "max_pooling2d_1", "strides": [2, 2], "padding": "valid", "trainable": true, "pool_size": [2, 2], "data_format": "channels_last"}, "class_name": "MaxPooling2D", "inbound_nodes": [[["conv2d_2", 0, 0, {}]]]}, {"name": "dropout_1", "config": {"name": "dropout_1", "rate": 0.25, "noise_shape": null, "seed": null, "trainable": true}, "class_name": "Dropout", "inbound_nodes": [[["max_pooling2d_1", 0, 0, {}]]]}, {"name": "conv2d_3", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "trainable": true, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 64, "kernel_regularizer": null, "kernel_size": [3, 3], "name": "conv2d_3", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "dilation_rate": [1, 1], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2D", "inbound_nodes": [[["dropout_1", 0, 0, {}]]]}, {"name": "conv2d_4", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "trainable": true, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 64, "kernel_regularizer": null, "kernel_size": [3, 3], "name": "conv2d_4", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "dilation_rate": [1, 1], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2D", "inbound_nodes": [[["conv2d_3", 0, 0, {}]]]}, {"name": "max_pooling2d_2", "config": {"name": "max_pooling2d_2", "strides": [2, 2], "padding": "valid", "trainable": true, "pool_size": [2, 2], "data_format": "channels_last"}, "class_name": "MaxPooling2D", "inbound_nodes": [[["conv2d_4", 0, 0, {}]]]}, {"name": "dropout_2", "config": {"name": "dropout_2", "rate": 0.25, "noise_shape": null, "seed": null, "trainable": true}, "class_name": "Dropout", "inbound_nodes": [[["max_pooling2d_2", 0, 0, {}]]]}, {"name": "conv2d_5", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "trainable": true, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 128, "kernel_regularizer": null, "kernel_size": [3, 3], "name": "conv2d_5", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "dilation_rate": [1, 1], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2D", "inbound_nodes": [[["dropout_2", 0, 0, {}]]]}, {"name": "conv2d_6", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "trainable": true, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 128, "kernel_regularizer": null, "kernel_size": [3, 3], "name": "conv2d_6", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "dilation_rate": [1, 1], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2D", "inbound_nodes": [[["conv2d_5", 0, 0, {}]]]}, {"name": "max_pooling2d_3", "config": {"name": "max_pooling2d_3", "strides": [2, 2], "padding": "valid", "trainable": true, "pool_size": [2, 2], "data_format": "channels_last"}, "class_name": "MaxPooling2D", "inbound_nodes": [[["conv2d_6", 0, 0, {}]]]}, {"name": "encoded_layer", "config": {"name": "encoded_layer", "rate": 0.25, "noise_shape": null, "seed": null, "trainable": true}, "class_name": "Dropout", "inbound_nodes": [[["max_pooling2d_3", 0, 0, {}]]]}, {"name": "conv2d_transpose_1", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 128, "kernel_regularizer": null, "trainable": true, "name": "conv2d_transpose_1", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "kernel_size": [3, 3], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2DTranspose", "inbound_nodes": [[["encoded_layer", 0, 0, {}]]]}, {"name": "conv2d_transpose_2", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 128, "kernel_regularizer": null, "trainable": true, "name": "conv2d_transpose_2", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "kernel_size": [3, 3], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2DTranspose", "inbound_nodes": [[["conv2d_transpose_1", 0, 0, {}]]]}, {"name": "up_sampling2d_1", "config": {"size": [2, 2], "name": "up_sampling2d_1", "trainable": true, "data_format": "channels_last"}, "class_name": "UpSampling2D", "inbound_nodes": [[["conv2d_transpose_2", 0, 0, {}]]]}, {"name": "dropout_3", "config": {"name": "dropout_3", "rate": 0.25, "noise_shape": null, "seed": null, "trainable": true}, "class_name": "Dropout", "inbound_nodes": [[["up_sampling2d_1", 0, 0, {}]]]}, {"name": "conv2d_transpose_3", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 64, "kernel_regularizer": null, "trainable": true, "name": "conv2d_transpose_3", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "kernel_size": [3, 3], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2DTranspose", "inbound_nodes": [[["dropout_3", 0, 0, {}]]]}, {"name": "conv2d_transpose_4", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 64, "kernel_regularizer": null, "trainable": true, "name": "conv2d_transpose_4", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "kernel_size": [3, 3], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2DTranspose", "inbound_nodes": [[["conv2d_transpose_3", 0, 0, {}]]]}, {"name": "up_sampling2d_2", "config": {"size": [2, 2], "name": "up_sampling2d_2", "trainable": true, "data_format": "channels_last"}, "class_name": "UpSampling2D", "inbound_nodes": [[["conv2d_transpose_4", 0, 0, {}]]]}, {"name": "dropout_4", "config": {"name": "dropout_4", "rate": 0.25, "noise_shape": null, "seed": null, "trainable": true}, "class_name": "Dropout", "inbound_nodes": [[["up_sampling2d_2", 0, 0, {}]]]}, {"name": "conv2d_transpose_5", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 32, "kernel_regularizer": null, "trainable": true, "name": "conv2d_transpose_5", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "kernel_size": [3, 3], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2DTranspose", "inbound_nodes": [[["dropout_4", 0, 0, {}]]]}, {"name": "conv2d_transpose_6", "config": {"kernel_initializer": {"config": {"scale": 1.0, "seed": null, "mode": "fan_avg", "distribution": "uniform"}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "use_bias": true, "filters": 3, "kernel_regularizer": null, "trainable": true, "name": "conv2d_transpose_6", "activity_regularizer": null, "strides": [1, 1], "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "padding": "same", "kernel_size": [3, 3], "activation": "relu", "data_format": "channels_last"}, "class_name": "Conv2DTranspose", "inbound_nodes": [[["conv2d_transpose_5", 0, 0, {}]]]}, {"name": "up_sampling2d_3", "config": {"size": [2, 2], "name": "up_sampling2d_3", "trainable": true, "data_format": "channels_last"}, "class_name": "UpSampling2D", "inbound_nodes": [[["conv2d_transpose_6", 0, 0, {}]]]}, {"name": "dropout_5", "config": {"name": "dropout_5", "rate": 0.25, "noise_shape": null, "seed": null, "trainable": true}, "class_name": "Dropout", "inbound_nodes": [[["up_sampling2d_3", 0, 0, {}]]]}], "name": "model_1", "input_layers": [["input_1", 0, 0]], "output_layers": [["dropout_5", 0, 0]]}, "class_name": "Model", "backend": "tensorflow"} \ No newline at end of file diff --git a/classes/model/bin/meta_dataset.npy b/classes/model/bin/meta_dataset.npy new file mode 100644 index 0000000000000000000000000000000000000000..4e3a684aa641877cfa6e96def5d615e3a33a8590 --- /dev/null +++ b/classes/model/bin/meta_dataset.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3327bd2481728a5d7306c51a057ae24a98aaf80a6f2a042b23c6af5e23d1a327 +size 294 diff --git a/classes/model/bin/words.vocab b/classes/model/bin/words.vocab new file mode 100644 index 0000000000000000000000000000000000000000..a48227bce1e2319261e2c55809cd62928231f144 --- /dev/null +++ b/classes/model/bin/words.vocab @@ -0,0 +1,20 @@ +-> 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. +,-> 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. +{-> 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. + -> 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. +header-> 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. +btn-active-> 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. + +-> 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. +text-> 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0. +quadruple-> 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0. +btn-inactive-> 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. +}-> 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0. +btn-orange-> 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0. +small-title-> 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0. +-> 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. +double-> 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1. +btn-red-> 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0. +row-> 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0. +single-> 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0. +btn-green-> 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0. diff --git a/compiler/.DS_Store b/compiler/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..67f24328f4e7c22aaec7780b6a9ee9b82ce61539 Binary files /dev/null and b/compiler/.DS_Store differ diff --git a/compiler/Compiler.py b/compiler/Compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..09634392f5feba203b190517936432199f62a2b7 --- /dev/null +++ b/compiler/Compiler.py @@ -0,0 +1,43 @@ +__author__ = 'Taneem Jan, taneemishere.github.io' + +import json +from compiler.Node import * + + +class Compiler: + def __init__(self, dsl_mapping_file_path): + with open(dsl_mapping_file_path) as data_file: + self.dsl_mapping = json.load(data_file) + + self.opening_tag = self.dsl_mapping["opening-tag"] + self.closing_tag = self.dsl_mapping["closing-tag"] + self.content_holder = self.opening_tag + self.closing_tag + + self.root = Node("body", None, self.content_holder) + + def compile(self, input_file_path, output_file_path, rendering_function=None): + dsl_file = open(input_file_path) + current_parent = self.root + + for token in dsl_file: + token = token.replace(" ", "").replace("\n", "") + + if token.find(self.opening_tag) != -1: + token = token.replace(self.opening_tag, "") + + element = Node(token, current_parent, self.content_holder) + current_parent.add_child(element) + current_parent = element + elif token.find(self.closing_tag) != -1: + current_parent = current_parent.parent + else: + tokens = token.split(",") + for t in tokens: + element = Node(t, current_parent, self.content_holder) + current_parent.add_child(element) + + output_html = self.root.render(self.dsl_mapping, rendering_function=rendering_function) + with open(output_file_path, 'w') as output_file: + output_file.write(output_html) + + return output_html diff --git a/compiler/Node.py b/compiler/Node.py new file mode 100644 index 0000000000000000000000000000000000000000..4dac248505d448edda8aea5b708c3fa7569ceb29 --- /dev/null +++ b/compiler/Node.py @@ -0,0 +1,32 @@ +from __future__ import print_function +__author__ = 'Taneem Jan, taneemishere.github.io' + + +class Node: + def __init__(self, key, parent_node, content_holder): + self.key = key + self.parent = parent_node + self.children = [] + self.content_holder = content_holder + + def add_child(self, child): + self.children.append(child) + + def show(self): + print(self.key) + for child in self.children: + child.show() + + def render(self, mapping, rendering_function=None): + content = "" + for child in self.children: + content += child.render(mapping, rendering_function) + + value = mapping[self.key] + if rendering_function is not None: + value = rendering_function(self.key, value) + + if len(self.children) != 0: + value = value.replace(self.content_holder, content) + + return value diff --git a/compiler/Utils.py b/compiler/Utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d84bae6559c8e752d4c034663cae22dd7b631952 --- /dev/null +++ b/compiler/Utils.py @@ -0,0 +1,51 @@ +__author__ = 'Taneem Jan, taneemishere.github.io' + +import string +import random + + +class Utils: + @staticmethod + def get_random_text(length_text=10, space_number=1, with_upper_case=True): + results = [] + while len(results) < length_text: + char = random.choice(string.ascii_letters[:26]) + results.append(char) + if with_upper_case: + results[0] = results[0].upper() + + current_spaces = [] + while len(current_spaces) < space_number: + space_pos = random.randint(2, length_text - 3) + if space_pos in current_spaces: + break + results[space_pos] = " " + if with_upper_case: + results[space_pos + 1] = results[space_pos - 1].upper() + + current_spaces.append(space_pos) + + return ''.join(results) + + @staticmethod + def get_ios_id(length=10): + results = [] + + while len(results) < length: + char = random.choice(string.digits + string.ascii_letters) + results.append(char) + + results[3] = "-" + results[6] = "-" + + return ''.join(results) + + @staticmethod + def get_android_id(length=10): + results = [] + + while len(results) < length: + char = random.choice(string.ascii_letters) + results.append(char) + + return ''.join(results) diff --git a/compiler/__init__.py b/compiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/compiler/__pycache__/Compiler.cpython-38.pyc b/compiler/__pycache__/Compiler.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9d8fecd39fa0687ee931f6dff23e6fd8199c638 Binary files /dev/null and b/compiler/__pycache__/Compiler.cpython-38.pyc differ diff --git a/compiler/__pycache__/Node.cpython-38.pyc b/compiler/__pycache__/Node.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72f08b97b0359075a03aa489a43b9066065ee6a8 Binary files /dev/null and b/compiler/__pycache__/Node.cpython-38.pyc differ diff --git a/compiler/__pycache__/Utils.cpython-38.pyc b/compiler/__pycache__/Utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75fa6462090b3a7c2c408a2aa7566b6638f89d34 Binary files /dev/null and b/compiler/__pycache__/Utils.cpython-38.pyc differ diff --git a/compiler/__pycache__/__init__.cpython-38.pyc b/compiler/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0f5f117dc3be58e28787577f87cb7cde1e7a347 Binary files /dev/null and b/compiler/__pycache__/__init__.cpython-38.pyc differ diff --git a/compiler/assets/.DS_Store b/compiler/assets/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..104dc965371dd5a22259c012903e1528eba4dc69 Binary files /dev/null and b/compiler/assets/.DS_Store differ diff --git a/compiler/assets/web-dsl-mapping.json b/compiler/assets/web-dsl-mapping.json new file mode 100644 index 0000000000000000000000000000000000000000..31af0997ac944efba0203d6c4d2bf423ca3c8e5b --- /dev/null +++ b/compiler/assets/web-dsl-mapping.json @@ -0,0 +1,18 @@ +{ + "opening-tag": "{", + "closing-tag": "}", + "body": "\n
\n \n \n \n\n\n Scaffold\n
\n \n
\n {}\n
\n

© Tony Beltramelli 2017

\n
\n
\n \n \n \n\n", + "header": "
\n \n
\n", + "btn-active": "
  • []
  • \n", + "btn-inactive": "
  • []
  • \n", + "row": "
    {}
    \n", + "single": "
    \n{}\n
    \n", + "double": "
    \n{}\n
    \n", + "quadruple": "
    \n{}\n
    \n", + "btn-green": "[]\n", + "btn-orange": "[]\n", + "btn-red": "[]", + "big-title": "

    []

    ", + "small-title": "

    []

    ", + "text": "

    []

    \n" +} diff --git a/data/.DS_Store b/data/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..6a47e17d9d3c223103b40fceb71b38df4cea890c Binary files /dev/null and b/data/.DS_Store differ diff --git a/data/output/.DS_Store b/data/output/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..695f8de2953ff043cc21b26dfb2ca3689dd8453b Binary files /dev/null and b/data/output/.DS_Store differ diff --git a/data/output/input_image_from_interface.gui b/data/output/input_image_from_interface.gui new file mode 100644 index 0000000000000000000000000000000000000000..2f9ef5fc7af6df27946bbe7cf4cc0d9f800fb60c --- /dev/null +++ b/data/output/input_image_from_interface.gui @@ -0,0 +1,30 @@ +header{ +btn-inactive,btn-active,btn-inactive,btn-inactive +} +row{ +double{ +small-title,text,btn-green +} +double{ +small-title,text,btn-green +} +} +row{ +single{ +small-title,text,btn-green +} +} +row{ +quadruple{ +small-title,text,btn-green +} +quadruple{ +small-title,text,btn-green +} +quadruple{ +small-title,text,btn-green +} +quadruple{ +small-title,text,btn-green +} +} diff --git a/data/output/input_image_from_interface.html b/data/output/input_image_from_interface.html new file mode 100644 index 0000000000000000000000000000000000000000..9350c65dfca6b07d852210d310e443689f58331d --- /dev/null +++ b/data/output/input_image_from_interface.html @@ -0,0 +1,71 @@ + +
    + + + + + + Scaffold +
    + +
    + +
    +

    Vvuqg

    kyc mygazeqz hndtqwpohcvy ntw ebb w ntcyiaifdlmmerjgbny

    +Aymvsrm Mm + +
    +
    +

    Oxwnt

    emulhrlcmferonzqkwiypjcda kihysiwafehx fehugcvmnpb wm kw

    +Tjx Xnqozs + +
    +
    +
    +

    Pcriq

    amxjka sr iuw gsqgajg iovj prvlizcawbskpoj vrfg bfpiknfb

    +Plt Tjoauq + +
    +
    +
    +

    Phaay

    mazjirkbyrs ypp tgzy yrbqqaoc xwlakfdpweiz nnqarqyywtcjj

    +Aapui Ioix + +
    +
    +

    Nvhpu

    thlxp nomjhzhphtf zfmtnvgfqmd axxfjpmvvanidwemcpp clvipc

    +Xe Ekngzcp + +
    +
    +

    Gwxqd

    hfthierlzdpavftvt oabrlsnqxwqjrvmywfrtjl czpgaigmu ojmis

    +Kfrcd Dleu + +
    +
    +

    Uadkt

    an mrz b oknpl uamqrz jmdcgk rzbjtpebnikzpfwzdzssxmjlnm

    +Qn Nwfydbn + +
    +
    + +
    +

    © Tony Beltramelli 2017

    +
    +
    + + + + diff --git a/examples/.DS_Store b/examples/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..ab1135ef8e1b019e649124290a0123baee93ee8a Binary files /dev/null and b/examples/.DS_Store differ diff --git a/examples/example-1.png b/examples/example-1.png new file mode 100644 index 0000000000000000000000000000000000000000..f9e46324dc7c0da5a35c294c0e064e9db838d890 Binary files /dev/null and b/examples/example-1.png differ diff --git a/examples/example-2.png b/examples/example-2.png new file mode 100644 index 0000000000000000000000000000000000000000..edf1858e2d9233e06661f1a72ea6b14b41ddcb1b Binary files /dev/null and b/examples/example-2.png differ diff --git a/examples/example-3.png b/examples/example-3.png new file mode 100644 index 0000000000000000000000000000000000000000..7666ab284d4d96ff506cf471d62b4d037f76ff18 Binary files /dev/null and b/examples/example-3.png differ diff --git a/flagged/image/0.jpg b/flagged/image/0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..345bb2f6de025ed741d7a62877a707163c30ad79 Binary files /dev/null and b/flagged/image/0.jpg differ diff --git a/flagged/log.csv b/flagged/log.csv new file mode 100644 index 0000000000000000000000000000000000000000..4309407442f2900d9c92b01ceca7197e6b6c9380 --- /dev/null +++ b/flagged/log.csv @@ -0,0 +1,68 @@ +'image','output','flag','username','timestamp' +'image/0.jpg',' +
    + + + + + + Scaffold +
    + +
    + +
    +

    Turbb

    stactifirueebhjphu dspvnzuloeawsj fqmgwovgqrdlxmgnqd ryi

    +Mmq Qjcqvt + +
    +
    +
    +

    Bnlpw

    pbalwp ynjvmyrvcuhdrzfnelwsimmwmgli auoikbqqmhgmtbtbkois

    +Siddri Inw + +
    +
    +

    Ynbfn

    cnslyyxlg wirqm qzqqf cdhsfabr nvhnpysbjumaxdz wlobh rr

    +Vkzke Eegf + +
    +
    +

    Lzhhu

    zvfznbwauo jefh kvh nskzdnsgbk beqlpt uuwaqxuc qdrszsvsk

    +Yz Zknvaez + +
    +
    +

    Ydqmf

    kqos aqrzbr ipqbq sezgcyowhx qnd fphtf ulumtiliwturwsts

    +Tdy Yvhijv + +
    +
    +
    +

    Aeqnr

    giiao slfyjvdmmzb pgwupxydzhsmishfraifywhjyt qlgqsk sbbz

    +Ifrwg Gdqb + +
    +
    + +
    +

    © Tony Beltramelli 2017

    +
    +
    + + + + +','','','2022-08-12 18:53:48.876787' diff --git a/main_program.py b/main_program.py new file mode 100644 index 0000000000000000000000000000000000000000..bf9ec602b215f5125d44e7e6071989a2fefaae88 --- /dev/null +++ b/main_program.py @@ -0,0 +1,89 @@ +from __future__ import absolute_import +from __future__ import print_function + +__author__ = 'Taneem Jan, taneemishere.github.io' + +import os.path +from os.path import basename + +from classes.Sampler import * +from classes.model.Main_Model import * + + +def dsl_code_generation(input_image): + trained_weights_path = "classes/model/bin" + trained_model_name = "Main_Model" + input_path = input_image + output_path = "data/output/" + search_method = "greedy" + meta_dataset = np.load("{}/meta_dataset.npy".format(trained_weights_path), allow_pickle=True) + input_shape = meta_dataset[0] + output_size = meta_dataset[1] + + model = Main_Model(input_shape, output_size, trained_weights_path) + model.load(trained_model_name) + + sampler = Sampler(trained_weights_path, input_shape, output_size, CONTEXT_LENGTH) + + file_name = 'input_image_from_interface.png' + file_name = basename(file_name)[:basename(file_name).find(".")] + evaluation_img = Utils.get_preprocessed_img(input_path, IMAGE_SIZE) + + if search_method == "greedy": + result, _ = sampler.predict_greedy(model, np.array([evaluation_img])) + print("Result greedy: \n {}".format(result)) + + with open("{}/{}.gui".format(output_path, file_name), 'w') as out_f: + out_f.write(result.replace(START_TOKEN, "").replace(END_TOKEN, "")) + + return file_name, output_path + + +def compile_gui(file_path, filename): + from os.path import basename + from compiler.Utils import Utils + from compiler.Compiler import Compiler + + input_path = (file_path + filename) + + # remove the path + file_ = os.path.basename(input_path) + # remove the extension + file_ = os.path.splitext(file_)[0] + # add the extension of gui + file_ = "data/output/" + file_ + ".gui" + + input_file = file_ + + FILL_WITH_RANDOM_TEXT = True + TEXT_PLACE_HOLDER = "[]" + + dsl_path = "compiler/assets/web-dsl-mapping.json" + compiler = Compiler(dsl_path) + + def render_content_with_text(key, value): + if FILL_WITH_RANDOM_TEXT: + if key.find("btn") != -1: + value = value.replace(TEXT_PLACE_HOLDER, Utils.get_random_text()) + elif key.find("title") != -1: + value = value.replace(TEXT_PLACE_HOLDER, Utils.get_random_text(length_text=5, space_number=0)) + elif key.find("text") != -1: + value = value.replace(TEXT_PLACE_HOLDER, + Utils.get_random_text(length_text=56, space_number=7, with_upper_case=False)) + return value + + file_uid = basename(input_file)[:basename(input_file).find(".")] + path = input_file[:input_file.find(file_uid)] + + input_file_path = "{}{}.gui".format(path, file_uid) + output_file_path = "{}{}.html".format(path, file_uid) + + html_code = compiler.compile(input_file_path, output_file_path, rendering_function=render_content_with_text) + print("Generated code is compiled..!!") + return html_code + + +def main_method(input_image_from_interface): + file_name, file_output_path = dsl_code_generation(input_image_from_interface) + result = compile_gui(file_output_path, file_name) + return result diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e747c5bb5d4403d1c9e3757e4ac222767be7c74 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +gradio~=3.0.20 +keras~=2.8.0 +numpy~=1.21.5 +tensorflow +opencv-python~=4.5.5