de3sec Joom commited on
Commit
64ba78f
0 Parent(s):

Duplicate from Joom/Front-end-code-generation-from-images

Browse files

Co-authored-by: Jude Sajith <Joom@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .DS_Store +0 -0
  2. .gitattributes +31 -0
  3. .idea/.gitignore +8 -0
  4. .idea/html-code-generation-from-images-with-deep-neural-networks.iml +10 -0
  5. .idea/inspectionProfiles/Project_Default.xml +89 -0
  6. .idea/inspectionProfiles/profiles_settings.xml +6 -0
  7. .idea/misc.xml +4 -0
  8. .idea/modules.xml +8 -0
  9. .idea/vcs.xml +6 -0
  10. README.md +14 -0
  11. __pycache__/app-with_examples.cpython-38.pyc +0 -0
  12. __pycache__/app.cpython-38.pyc +0 -0
  13. __pycache__/main_program.cpython-38.pyc +0 -0
  14. app.py +38 -0
  15. classes/.DS_Store +0 -0
  16. classes/Sampler.py +59 -0
  17. classes/Utils.py +39 -0
  18. classes/Vocabulary.py +78 -0
  19. classes/__init__.py +0 -0
  20. classes/__pycache__/BeamSearch.cpython-35.pyc +0 -0
  21. classes/__pycache__/BeamSearch.cpython-38.pyc +0 -0
  22. classes/__pycache__/BeamSearch.cpython-39.pyc +0 -0
  23. classes/__pycache__/Sampler.cpython-35.pyc +0 -0
  24. classes/__pycache__/Sampler.cpython-38.pyc +0 -0
  25. classes/__pycache__/Sampler.cpython-39.pyc +0 -0
  26. classes/__pycache__/Utils.cpython-35.pyc +0 -0
  27. classes/__pycache__/Utils.cpython-38.pyc +0 -0
  28. classes/__pycache__/Utils.cpython-39.pyc +0 -0
  29. classes/__pycache__/Vocabulary.cpython-35.pyc +0 -0
  30. classes/__pycache__/Vocabulary.cpython-38.pyc +0 -0
  31. classes/__pycache__/Vocabulary.cpython-39.pyc +0 -0
  32. classes/__pycache__/__init__.cpython-35.pyc +0 -0
  33. classes/__pycache__/__init__.cpython-38.pyc +0 -0
  34. classes/__pycache__/__init__.cpython-39.pyc +0 -0
  35. classes/model/.DS_Store +0 -0
  36. classes/model/AModel.py +25 -0
  37. classes/model/Config.py +7 -0
  38. classes/model/Main_Model.py +71 -0
  39. classes/model/__init__.py +0 -0
  40. classes/model/__pycache__/AModel.cpython-35.pyc +0 -0
  41. classes/model/__pycache__/AModel.cpython-38.pyc +0 -0
  42. classes/model/__pycache__/Config.cpython-35.pyc +0 -0
  43. classes/model/__pycache__/Config.cpython-38.pyc +0 -0
  44. classes/model/__pycache__/Main_Model.cpython-38.pyc +0 -0
  45. classes/model/__pycache__/__init__.cpython-35.pyc +0 -0
  46. classes/model/__pycache__/__init__.cpython-38.pyc +0 -0
  47. classes/model/__pycache__/__init__.cpython-39.pyc +0 -0
  48. classes/model/__pycache__/autoencoder_image.cpython-35.pyc +0 -0
  49. classes/model/__pycache__/autoencoder_image.cpython-38.pyc +0 -0
  50. classes/model/__pycache__/pix2code.cpython-35.pyc +0 -0
.DS_Store ADDED
Binary file (10.2 kB). View file
 
.gitattributes ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.npy filter=lfs diff=lfs merge=lfs -text
13
+ *.npz filter=lfs diff=lfs merge=lfs -text
14
+ *.onnx filter=lfs diff=lfs merge=lfs -text
15
+ *.ot filter=lfs diff=lfs merge=lfs -text
16
+ *.parquet filter=lfs diff=lfs merge=lfs -text
17
+ *.pickle filter=lfs diff=lfs merge=lfs -text
18
+ *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pt filter=lfs diff=lfs merge=lfs -text
21
+ *.pth filter=lfs diff=lfs merge=lfs -text
22
+ *.rar filter=lfs diff=lfs merge=lfs -text
23
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
25
+ *.tflite filter=lfs diff=lfs merge=lfs -text
26
+ *.tgz filter=lfs diff=lfs merge=lfs -text
27
+ *.wasm filter=lfs diff=lfs merge=lfs -text
28
+ *.xz filter=lfs diff=lfs merge=lfs -text
29
+ *.zip filter=lfs diff=lfs merge=lfs -text
30
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
31
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/html-code-generation-from-images-with-deep-neural-networks.iml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <excludeFolder url="file://$MODULE_DIR$/venv" />
6
+ </content>
7
+ <orderEntry type="inheritedJdk" />
8
+ <orderEntry type="sourceFolder" forTests="false" />
9
+ </component>
10
+ </module>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="DuplicatedCode" enabled="true" level="WEAK WARNING" enabled_by_default="true">
5
+ <Languages>
6
+ <language minSize="160" name="Python" />
7
+ </Languages>
8
+ </inspection_tool>
9
+ <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
10
+ <option name="ignoredPackages">
11
+ <value>
12
+ <list size="63">
13
+ <item index="0" class="java.lang.String" itemvalue="traitlets" />
14
+ <item index="1" class="java.lang.String" itemvalue="functools32" />
15
+ <item index="2" class="java.lang.String" itemvalue="protobuf" />
16
+ <item index="3" class="java.lang.String" itemvalue="html5lib" />
17
+ <item index="4" class="java.lang.String" itemvalue="scikit-learn" />
18
+ <item index="5" class="java.lang.String" itemvalue="PyYAML" />
19
+ <item index="6" class="java.lang.String" itemvalue="pickleshare" />
20
+ <item index="7" class="java.lang.String" itemvalue="python-dateutil" />
21
+ <item index="8" class="java.lang.String" itemvalue="cycler" />
22
+ <item index="9" class="java.lang.String" itemvalue="ipython-genutils" />
23
+ <item index="10" class="java.lang.String" itemvalue="tables" />
24
+ <item index="11" class="java.lang.String" itemvalue="appdirs" />
25
+ <item index="12" class="java.lang.String" itemvalue="Pygments" />
26
+ <item index="13" class="java.lang.String" itemvalue="scandir" />
27
+ <item index="14" class="java.lang.String" itemvalue="bleach" />
28
+ <item index="15" class="java.lang.String" itemvalue="pyparsing" />
29
+ <item index="16" class="java.lang.String" itemvalue="Markdown" />
30
+ <item index="17" class="java.lang.String" itemvalue="tensorflow-gpu" />
31
+ <item index="18" class="java.lang.String" itemvalue="Werkzeug" />
32
+ <item index="19" class="java.lang.String" itemvalue="h5py" />
33
+ <item index="20" class="java.lang.String" itemvalue="unity-lens-photos" />
34
+ <item index="21" class="java.lang.String" itemvalue="pkg-resources" />
35
+ <item index="22" class="java.lang.String" itemvalue="python-gflags" />
36
+ <item index="23" class="java.lang.String" itemvalue="leveldb" />
37
+ <item index="24" class="java.lang.String" itemvalue="pexpect" />
38
+ <item index="25" class="java.lang.String" itemvalue="Theano" />
39
+ <item index="26" class="java.lang.String" itemvalue="matplotlib" />
40
+ <item index="27" class="java.lang.String" itemvalue="virtualenv" />
41
+ <item index="28" class="java.lang.String" itemvalue="mock" />
42
+ <item index="29" class="java.lang.String" itemvalue="Keras" />
43
+ <item index="30" class="java.lang.String" itemvalue="enum34" />
44
+ <item index="31" class="java.lang.String" itemvalue="numexpr" />
45
+ <item index="32" class="java.lang.String" itemvalue="scikit-image" />
46
+ <item index="33" class="java.lang.String" itemvalue="pbr" />
47
+ <item index="34" class="java.lang.String" itemvalue="decorator" />
48
+ <item index="35" class="java.lang.String" itemvalue="networkx" />
49
+ <item index="36" class="java.lang.String" itemvalue="ptyprocess" />
50
+ <item index="37" class="java.lang.String" itemvalue="funcsigs" />
51
+ <item index="38" class="java.lang.String" itemvalue="backports.shutil-get-terminal-size" />
52
+ <item index="39" class="java.lang.String" itemvalue="wcwidth" />
53
+ <item index="40" class="java.lang.String" itemvalue="numpy" />
54
+ <item index="41" class="java.lang.String" itemvalue="simplegeneric" />
55
+ <item index="42" class="java.lang.String" itemvalue="adium-theme-ubuntu" />
56
+ <item index="43" class="java.lang.String" itemvalue="ipdb" />
57
+ <item index="44" class="java.lang.String" itemvalue="backports.weakref" />
58
+ <item index="45" class="java.lang.String" itemvalue="PyWavelets" />
59
+ <item index="46" class="java.lang.String" itemvalue="prompt-toolkit" />
60
+ <item index="47" class="java.lang.String" itemvalue="Cython" />
61
+ <item index="48" class="java.lang.String" itemvalue="nose" />
62
+ <item index="49" class="java.lang.String" itemvalue="scipy" />
63
+ <item index="50" class="java.lang.String" itemvalue="subprocess32" />
64
+ <item index="51" class="java.lang.String" itemvalue="tensorflow-tensorboard" />
65
+ <item index="52" class="java.lang.String" itemvalue="six" />
66
+ <item index="53" class="java.lang.String" itemvalue="opencv-python" />
67
+ <item index="54" class="java.lang.String" itemvalue="ipython" />
68
+ <item index="55" class="java.lang.String" itemvalue="packaging" />
69
+ <item index="56" class="java.lang.String" itemvalue="futures" />
70
+ <item index="57" class="java.lang.String" itemvalue="pathlib2" />
71
+ <item index="58" class="java.lang.String" itemvalue="pandas" />
72
+ <item index="59" class="java.lang.String" itemvalue="olefile" />
73
+ <item index="60" class="java.lang.String" itemvalue="Pyste" />
74
+ <item index="61" class="java.lang.String" itemvalue="pytz" />
75
+ <item index="62" class="java.lang.String" itemvalue="Pillow" />
76
+ </list>
77
+ </value>
78
+ </option>
79
+ </inspection_tool>
80
+ <inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
81
+ <option name="ignoredIdentifiers">
82
+ <list>
83
+ <option value="model.compclasses.Compiler.compclasses" />
84
+ <option value="model.compclasses.Utils.Utils.get_preprocessed_img" />
85
+ </list>
86
+ </option>
87
+ </inspection_tool>
88
+ </profile>
89
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (dnn)" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/html-code-generation-from-images-with-deep-neural-networks.iml" filepath="$PROJECT_DIR$/.idea/html-code-generation-from-images-with-deep-neural-networks.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="$PROJECT_DIR$" vcs="Git" />
5
+ </component>
6
+ </project>
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Image to HTML Code Demo
3
+ emoji: 🧑‍💻
4
+ colorFrom: pink
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 3.1.4
8
+ app_file: app.py
9
+ pinned: false
10
+ license: afl-3.0
11
+ duplicated_from: Joom/Front-end-code-generation-from-images
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
__pycache__/app-with_examples.cpython-38.pyc ADDED
Binary file (2.96 kB). View file
 
__pycache__/app.cpython-38.pyc ADDED
Binary file (2.95 kB). View file
 
__pycache__/main_program.cpython-38.pyc ADDED
Binary file (2.82 kB). View file
 
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __author__ = 'Taneem Jan, taneemishere.github.io'
2
+
3
+ import gradio as gr
4
+ import main_program
5
+
6
+
7
+ # our model's i/o method that take image from gradio interface's inputs.Image()
8
+ def model_interface(image):
9
+ return main_model(image)
10
+
11
+
12
+ # main method that call the main_program where code is generated and then compiled
13
+ def main_model(input_image):
14
+ result = main_program.main_method(input_image)
15
+ return result
16
+
17
+
18
+ interface_title = "<br> <p style='margin: 0% 8% 0% 8%'>Front end Code Generation with Deep Neural Networks</p>"
19
+ interface_description = """<p style='margin: 0% 8% 2% 8%; text-align: justify;text-justify: inter-word;'> Input sketch image and select the framework
20
+ and click on submit to generate</p> """
21
+
22
+ interface_article = """<br><h2 style='text-align: center;'>Creafted with care from Jude</h2> """
23
+
24
+ interface_examples = ['examples/example-1.png', 'examples/example-2.png', 'examples/example-3.png']
25
+
26
+ # a gradio interface to convert a image to HTML Code
27
+ interface = gr.Interface(
28
+ model_interface,
29
+ inputs='image',
30
+ outputs='text',
31
+ allow_flagging="manual",
32
+ title=interface_title,
33
+ description=interface_description,
34
+ article=interface_article,
35
+ examples=interface_examples
36
+ )
37
+
38
+ interface.launch(share=False)
classes/.DS_Store ADDED
Binary file (6.15 kB). View file
 
classes/Sampler.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+ from __future__ import absolute_import
3
+ __author__ = 'Taneem Jan, taneemishere.github.io'
4
+
5
+ from .Vocabulary import *
6
+ from .Utils import *
7
+
8
+
9
+ class Sampler:
10
+ def __init__(self, voc_path, input_shape, output_size, context_length):
11
+ self.voc = Vocabulary()
12
+ self.voc.retrieve(voc_path)
13
+
14
+ self.input_shape = input_shape
15
+ self.output_size = output_size
16
+
17
+ print("Vocabulary size: {}".format(self.voc.size))
18
+ print("Input shape: {}".format(self.input_shape))
19
+ print("Output size: {}".format(self.output_size))
20
+
21
+ self.context_length = context_length
22
+
23
+ def predict_greedy(self, model, input_img, require_sparse_label=True, sequence_length=150, verbose=False):
24
+ current_context = [self.voc.vocabulary[PLACEHOLDER]] * (self.context_length - 1)
25
+ current_context.append(self.voc.vocabulary[START_TOKEN])
26
+ if require_sparse_label:
27
+ current_context = Utils.sparsify(current_context, self.output_size)
28
+
29
+ predictions = START_TOKEN
30
+ out_probas = []
31
+
32
+ for i in range(0, sequence_length):
33
+ if verbose:
34
+ print("predicting {}/{}...".format(i, sequence_length))
35
+
36
+ probas = model.predict(input_img, np.array([current_context]))
37
+ prediction = np.argmax(probas)
38
+ out_probas.append(probas)
39
+
40
+ new_context = []
41
+ for j in range(1, self.context_length):
42
+ new_context.append(current_context[j])
43
+
44
+ if require_sparse_label:
45
+ sparse_label = np.zeros(self.output_size)
46
+ sparse_label[prediction] = 1
47
+ new_context.append(sparse_label)
48
+ else:
49
+ new_context.append(prediction)
50
+
51
+ current_context = new_context
52
+
53
+ predictions += self.voc.token_lookup[prediction]
54
+
55
+ if self.voc.token_lookup[prediction] == END_TOKEN:
56
+ break
57
+
58
+ return predictions, out_probas
59
+
classes/Utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __author__ = 'Taneem Jan, taneemishere.github.io'
2
+
3
+ import numpy as np
4
+
5
+
6
+ class Utils:
7
+ @staticmethod
8
+ def sparsify(label_vector, output_size):
9
+ sparse_vector = []
10
+
11
+ for label in label_vector:
12
+ sparse_label = np.zeros(output_size)
13
+ sparse_label[label] = 1
14
+
15
+ sparse_vector.append(sparse_label)
16
+
17
+ return np.array(sparse_vector)
18
+
19
+ @staticmethod
20
+ def get_preprocessed_img(img_path, image_size):
21
+ import cv2
22
+ # from keras.preprocessing.image import array_to_img, img_to_array
23
+ # img = array_to_img(img_path)
24
+ # img = img_to_array(img)
25
+ # img = cv2.imread(img_path)
26
+ # don't need to read the image as we're now directly passing the
27
+ # image as numpy array to this method
28
+ img = cv2.resize(img_path, (image_size, image_size))
29
+ img = img.astype('float32')
30
+ img /= 255
31
+ return img
32
+
33
+ @staticmethod
34
+ def show(image):
35
+ import cv2
36
+ cv2.namedWindow("view", cv2.WINDOW_AUTOSIZE)
37
+ cv2.imshow("view", image)
38
+ cv2.waitKey(0)
39
+ cv2.destroyWindow("view")
classes/Vocabulary.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __author__ = 'Taneem Jan, taneemishere.github.io'
2
+
3
+ import sys
4
+ import numpy as np
5
+
6
+ START_TOKEN = "<START>"
7
+ END_TOKEN = "<END>"
8
+ PLACEHOLDER = " "
9
+ SEPARATOR = '->'
10
+
11
+
12
+ class Vocabulary:
13
+ def __init__(self):
14
+ self.binary_vocabulary = {}
15
+ self.vocabulary = {}
16
+ self.token_lookup = {}
17
+ self.size = 0
18
+
19
+ self.append(START_TOKEN)
20
+ self.append(END_TOKEN)
21
+ self.append(PLACEHOLDER)
22
+
23
+ def append(self, token):
24
+ if token not in self.vocabulary:
25
+ self.vocabulary[token] = self.size
26
+ self.token_lookup[self.size] = token
27
+ self.size += 1
28
+
29
+ def create_binary_representation(self):
30
+ if sys.version_info >= (3,):
31
+ items = self.vocabulary.items()
32
+ else:
33
+ items = self.vocabulary.iteritems()
34
+ for key, value in items:
35
+ binary = np.zeros(self.size)
36
+ binary[value] = 1
37
+ self.binary_vocabulary[key] = binary
38
+
39
+ def get_serialized_binary_representation(self):
40
+ if len(self.binary_vocabulary) == 0:
41
+ self.create_binary_representation()
42
+
43
+ string = ""
44
+ if sys.version_info >= (3,):
45
+ items = self.binary_vocabulary.items()
46
+ else:
47
+ items = self.binary_vocabulary.iteritems()
48
+ for key, value in items:
49
+ array_as_string = np.array2string(value, separator=',', max_line_width=self.size * self.size)
50
+ string += "{}{}{}\n".format(key, SEPARATOR, array_as_string[1:len(array_as_string) - 1])
51
+ return string
52
+
53
+ def save(self, path):
54
+ output_file_name = "{}/words.vocab".format(path)
55
+ output_file = open(output_file_name, 'w')
56
+ output_file.write(self.get_serialized_binary_representation())
57
+ output_file.close()
58
+
59
+ def retrieve(self, path):
60
+ input_file = open("{}/words.vocab".format(path), 'r')
61
+ buffer = ""
62
+ for line in input_file:
63
+ try:
64
+ separator_position = len(buffer) + line.index(SEPARATOR)
65
+ buffer += line
66
+ key = buffer[:separator_position]
67
+ value = buffer[separator_position + len(SEPARATOR):]
68
+ value = np.fromstring(value, sep=',')
69
+
70
+ self.binary_vocabulary[key] = value
71
+ self.vocabulary[key] = np.where(value == 1)[0][0]
72
+ self.token_lookup[np.where(value == 1)[0][0]] = key
73
+
74
+ buffer = ""
75
+ except ValueError:
76
+ buffer += line
77
+ input_file.close()
78
+ self.size = len(self.vocabulary)
classes/__init__.py ADDED
File without changes
classes/__pycache__/BeamSearch.cpython-35.pyc ADDED
Binary file (4.56 kB). View file
 
classes/__pycache__/BeamSearch.cpython-38.pyc ADDED
Binary file (4.2 kB). View file
 
classes/__pycache__/BeamSearch.cpython-39.pyc ADDED
Binary file (4.23 kB). View file
 
classes/__pycache__/Sampler.cpython-35.pyc ADDED
Binary file (3.39 kB). View file
 
classes/__pycache__/Sampler.cpython-38.pyc ADDED
Binary file (1.75 kB). View file
 
classes/__pycache__/Sampler.cpython-39.pyc ADDED
Binary file (3.09 kB). View file
 
classes/__pycache__/Utils.cpython-35.pyc ADDED
Binary file (1.28 kB). View file
 
classes/__pycache__/Utils.cpython-38.pyc ADDED
Binary file (1.24 kB). View file
 
classes/__pycache__/Utils.cpython-39.pyc ADDED
Binary file (1.24 kB). View file
 
classes/__pycache__/Vocabulary.cpython-35.pyc ADDED
Binary file (2.86 kB). View file
 
classes/__pycache__/Vocabulary.cpython-38.pyc ADDED
Binary file (2.66 kB). View file
 
classes/__pycache__/Vocabulary.cpython-39.pyc ADDED
Binary file (2.64 kB). View file
 
classes/__pycache__/__init__.cpython-35.pyc ADDED
Binary file (150 Bytes). View file
 
classes/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (163 Bytes). View file
 
classes/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (188 Bytes). View file
 
classes/model/.DS_Store ADDED
Binary file (6.15 kB). View file
 
classes/model/AModel.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __author__ = 'Taneem Jan, taneemishere.github.io'
2
+
3
+ from keras.models import model_from_json
4
+
5
+
6
+ class AModel:
7
+ def __init__(self, input_shape, output_size, output_path):
8
+ self.model = None
9
+ self.input_shape = input_shape
10
+ self.output_size = output_size
11
+ self.output_path = output_path
12
+ self.name = ""
13
+
14
+ def save(self):
15
+ model_json = self.model.to_json()
16
+ with open("{}/{}.json".format(self.output_path, self.name), "w") as json_file:
17
+ json_file.write(model_json)
18
+ self.model.save_weights("{}/{}.h5".format(self.output_path, self.name))
19
+
20
+ def load(self, name=""):
21
+ output_name = self.name if name == "" else name
22
+ with open("{}/{}.json".format(self.output_path, output_name), "r") as json_file:
23
+ loaded_model_json = json_file.read()
24
+ self.model = model_from_json(loaded_model_json)
25
+ self.model.load_weights("{}/{}.h5".format(self.output_path, output_name))
classes/model/Config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ __author__ = 'Taneem Jan, taneemishere.github.io'
2
+
3
+ CONTEXT_LENGTH = 48
4
+ IMAGE_SIZE = 256
5
+ BATCH_SIZE = 64
6
+ EPOCHS = 10
7
+ STEPS_PER_EPOCH = 72000
classes/model/Main_Model.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __author__ = 'Taneem Jan, improved the old model through pretrained Auto-encoders'
2
+
3
+ from keras.layers import Input, Dense, Dropout, RepeatVector, LSTM, concatenate, Flatten
4
+ from keras.models import Sequential, Model
5
+ from tensorflow.keras.optimizers import RMSprop
6
+ from .Config import *
7
+ from .AModel import *
8
+ from .autoencoder_image import *
9
+
10
+
11
+ class Main_Model(AModel):
12
+ def __init__(self, input_shape, output_size, output_path):
13
+ AModel.__init__(self, input_shape, output_size, output_path)
14
+ self.name = "Main_Model"
15
+
16
+ visual_input = Input(shape=input_shape)
17
+
18
+ # Load the pre-trained autoencoder model
19
+ autoencoder_model = autoencoder_image(input_shape, input_shape, output_path)
20
+ autoencoder_model.load('autoencoder')
21
+ path = "classes/model/bin/"
22
+ path_to_autoencoder = "{}autoencoder.h5".format(path)
23
+ autoencoder_model.model.load_weights(path_to_autoencoder)
24
+ # Get only the model up to the encoded part
25
+ hidden_layer_model_freeze = Model(
26
+ inputs=autoencoder_model.model.input,
27
+ outputs=autoencoder_model.model.get_layer('encoded_layer').output
28
+ )
29
+ hidden_layer_input = hidden_layer_model_freeze(visual_input)
30
+
31
+ # Additional layers before concatenation
32
+ hidden_layer_model = Flatten()(hidden_layer_input)
33
+ hidden_layer_model = Dense(1024, activation='relu')(hidden_layer_model)
34
+ hidden_layer_model = Dropout(0.3)(hidden_layer_model)
35
+ hidden_layer_model = Dense(1024, activation='relu')(hidden_layer_model)
36
+ hidden_layer_model = Dropout(0.3)(hidden_layer_model)
37
+ hidden_layer_result = RepeatVector(CONTEXT_LENGTH)(hidden_layer_model)
38
+
39
+ # Making sure the loaded hidden_layer_model_freeze will no longer be updated
40
+ for layer in hidden_layer_model_freeze.layers:
41
+ layer.trainable = False
42
+
43
+ # The same language model that of pix2code by Tony Beltramelli
44
+ language_model = Sequential()
45
+ language_model.add(LSTM(128, return_sequences=True, input_shape=(CONTEXT_LENGTH, output_size)))
46
+ language_model.add(LSTM(128, return_sequences=True))
47
+
48
+ textual_input = Input(shape=(CONTEXT_LENGTH, output_size))
49
+ encoded_text = language_model(textual_input)
50
+
51
+ decoder = concatenate([hidden_layer_result, encoded_text])
52
+
53
+ decoder = LSTM(512, return_sequences=True)(decoder)
54
+ decoder = LSTM(512, return_sequences=False)(decoder)
55
+ decoder = Dense(output_size, activation='softmax')(decoder)
56
+
57
+ self.model = Model(inputs=[visual_input, textual_input], outputs=decoder)
58
+
59
+ optimizer = RMSprop(learning_rate=0.0001, clipvalue=1.0)
60
+ self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
61
+
62
+ def fit_generator(self, generator, steps_per_epoch):
63
+ # self.model.summary()
64
+ self.model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, verbose=1)
65
+ self.save()
66
+
67
+ def predict(self, image, partial_caption):
68
+ return self.model.predict([image, partial_caption], verbose=0)[0]
69
+
70
+ def predict_batch(self, images, partial_captions):
71
+ return self.model.predict([images, partial_captions], verbose=1)
classes/model/__init__.py ADDED
File without changes
classes/model/__pycache__/AModel.cpython-35.pyc ADDED
Binary file (1.36 kB). View file
 
classes/model/__pycache__/AModel.cpython-38.pyc ADDED
Binary file (1.33 kB). View file
 
classes/model/__pycache__/Config.cpython-35.pyc ADDED
Binary file (343 Bytes). View file
 
classes/model/__pycache__/Config.cpython-38.pyc ADDED
Binary file (395 Bytes). View file
 
classes/model/__pycache__/Main_Model.cpython-38.pyc ADDED
Binary file (2.73 kB). View file
 
classes/model/__pycache__/__init__.cpython-35.pyc ADDED
Binary file (156 Bytes). View file
 
classes/model/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (169 Bytes). View file
 
classes/model/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (194 Bytes). View file
 
classes/model/__pycache__/autoencoder_image.cpython-35.pyc ADDED
Binary file (2.58 kB). View file
 
classes/model/__pycache__/autoencoder_image.cpython-38.pyc ADDED
Binary file (2.18 kB). View file
 
classes/model/__pycache__/pix2code.cpython-35.pyc ADDED
Binary file (3.31 kB). View file