Spaces:
Sleeping
Sleeping
DoDucNhan
commited on
Commit
·
d7ff06f
1
Parent(s):
835b8d1
add files
Browse files- Dockerfile +0 -0
- app/__init__.py +5 -0
- app/__pycache__/__init__.cpython-38.pyc +0 -0
- app/__pycache__/routes.cpython-38.pyc +0 -0
- app/routes.py +16 -0
- app/static/cat vs dog/best.h5 +3 -0
- app/static/cat vs dog/tfjs_model/group1-shard1of3.bin +3 -0
- app/static/cat vs dog/tfjs_model/group1-shard2of3.bin +3 -0
- app/static/cat vs dog/tfjs_model/group1-shard3of3.bin +3 -0
- app/static/cat vs dog/tfjs_model/model.json +0 -0
- app/static/flowers/best.h5 +3 -0
- app/static/flowers/tfjs_model/group1-shard1of4.bin +3 -0
- app/static/flowers/tfjs_model/group1-shard2of4.bin +3 -0
- app/static/flowers/tfjs_model/group1-shard3of4.bin +3 -0
- app/static/flowers/tfjs_model/group1-shard4of4.bin +3 -0
- app/static/flowers/tfjs_model/model.json +0 -0
- app/static/styles/style.css +127 -0
- app/templates/base.html +46 -0
- app/templates/cats_dogs.html +61 -0
- app/templates/flowers.html +78 -0
- cat_dog.py +82 -0
- flowers.py +80 -0
- main.py +4 -0
- requirements.txt +3 -0
Dockerfile
ADDED
File without changes
|
app/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask
|
2 |
+
|
3 |
+
app = Flask(__name__)
|
4 |
+
|
5 |
+
from app import routes
|
app/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (229 Bytes). View file
|
|
app/__pycache__/routes.cpython-38.pyc
ADDED
Binary file (672 Bytes). View file
|
|
app/routes.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import render_template
|
2 |
+
from app import app
|
3 |
+
|
4 |
+
|
5 |
+
@app.route('/', methods=['GET'])
|
6 |
+
@app.route('/flowers', methods=['GET'])
|
7 |
+
def login():
|
8 |
+
return render_template('flowers.html', title='Flowers', header='FLOWERS', status1="active")
|
9 |
+
|
10 |
+
|
11 |
+
@app.route('/cats_dogs', methods=['GET'])
|
12 |
+
def cats_dogs():
|
13 |
+
return render_template('cats_dogs.html', title='Cat vs dog', header='CAT AND DOG', status2="active")
|
14 |
+
|
15 |
+
|
16 |
+
|
app/static/cat vs dog/best.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:17ee35f7f3f023b9154913ee07622eb4a6d878c8f208f9735bc11e0c7bf64059
|
3 |
+
size 17077688
|
app/static/cat vs dog/tfjs_model/group1-shard1of3.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:055e074f4264a2fa54d8ec1b7e365c9b6c4b92c468e6a7e003bf61cbec2eb7db
|
3 |
+
size 4194304
|
app/static/cat vs dog/tfjs_model/group1-shard2of3.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:57910314fefafaf01acfb2edaf901c1aafd4a26f402f65492ce710e7483432c4
|
3 |
+
size 4194304
|
app/static/cat vs dog/tfjs_model/group1-shard3of3.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7126ad08d1b2ef785b5069bf62698e588d7710ef8ffb90a596e89603b9f11f65
|
3 |
+
size 648452
|
app/static/cat vs dog/tfjs_model/model.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app/static/flowers/best.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c633a1963e36fd6f3ffa483dfe395ab494b308ce98ac3c1572b35abef41a52a
|
3 |
+
size 31591736
|
app/static/flowers/tfjs_model/group1-shard1of4.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d697f07703ef03a85b29bd3371fe763da87fe09af183be4f98faa1f67eeca2c5
|
3 |
+
size 4194304
|
app/static/flowers/tfjs_model/group1-shard2of4.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c2d678e8d7f6ca18627778c7d90ff0bef393f2f2f2e91929878bcb57213ecf57
|
3 |
+
size 4194304
|
app/static/flowers/tfjs_model/group1-shard3of4.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f272dcb7a73a1f281dccc6229b20a6b76f552c764f1046d0df2cdf83e1ecc0f
|
3 |
+
size 4194304
|
app/static/flowers/tfjs_model/group1-shard4of4.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37245d66a166110b45decca6d1d4ac623225cf60e4a27fee0b83a65d1f20395d
|
3 |
+
size 3805460
|
app/static/flowers/tfjs_model/model.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app/static/styles/style.css
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ul {
|
2 |
+
list-style-type: none;
|
3 |
+
margin: 0;
|
4 |
+
padding: 0;
|
5 |
+
overflow: hidden;
|
6 |
+
background-color: #333;
|
7 |
+
}
|
8 |
+
|
9 |
+
li {
|
10 |
+
float: left;
|
11 |
+
}
|
12 |
+
|
13 |
+
li a {
|
14 |
+
display: block;
|
15 |
+
color: white;
|
16 |
+
text-align: center;
|
17 |
+
padding: 14px 16px;
|
18 |
+
text-decoration: none;
|
19 |
+
}
|
20 |
+
|
21 |
+
li a:hover {
|
22 |
+
background-color: #111;
|
23 |
+
}
|
24 |
+
|
25 |
+
.active {
|
26 |
+
background-color: #4CAF50;
|
27 |
+
}
|
28 |
+
|
29 |
+
body {
|
30 |
+
padding: 0px;
|
31 |
+
margin: 0px;
|
32 |
+
font-family: Arial, Helvetica, sans-serif;
|
33 |
+
color: white;
|
34 |
+
}
|
35 |
+
|
36 |
+
body, html {
|
37 |
+
height: 100%;
|
38 |
+
}
|
39 |
+
|
40 |
+
html, body {
|
41 |
+
padding: 0px;
|
42 |
+
margin: 0px;
|
43 |
+
background: #35363A;
|
44 |
+
background-position: center;
|
45 |
+
background-repeat: no-repeat;
|
46 |
+
background-size: cover;
|
47 |
+
height: 100%;
|
48 |
+
overflow-x: scroll;
|
49 |
+
}
|
50 |
+
|
51 |
+
.header {
|
52 |
+
background-color: rgba(0, 0, 0, 0.3);
|
53 |
+
font-weight: bold;
|
54 |
+
text-transform: uppercase;
|
55 |
+
font-size: large;
|
56 |
+
color:white;
|
57 |
+
|
58 |
+
text-align: center;
|
59 |
+
padding: 30px;
|
60 |
+
}
|
61 |
+
|
62 |
+
.info
|
63 |
+
{
|
64 |
+
text-align:left;
|
65 |
+
color:gray;
|
66 |
+
width:30%;
|
67 |
+
margin:auto;
|
68 |
+
padding:20px;
|
69 |
+
font-size:14px;
|
70 |
+
}
|
71 |
+
|
72 |
+
.content {
|
73 |
+
width: 100%;
|
74 |
+
}
|
75 |
+
|
76 |
+
.upload_part {
|
77 |
+
background-color: rgba(0, 0, 0, 0.3);
|
78 |
+
text-align: center;
|
79 |
+
padding: 40px;
|
80 |
+
}
|
81 |
+
|
82 |
+
.upload_button {
|
83 |
+
background-color: #FEED06;
|
84 |
+
color: black;
|
85 |
+
padding: 15px;
|
86 |
+
font-size: 16px;
|
87 |
+
border: 0px;
|
88 |
+
border-radius: 5px;
|
89 |
+
-webkit-box-shadow: 6px 8px 18px -5px rgba(0, 0, 0, 0.38);
|
90 |
+
-moz-box-shadow: 6px 8px 18px -5px rgba(0, 0, 0, 0.38);
|
91 |
+
box-shadow: 6px 8px 18px -5px rgba(0, 0, 0, 0.38);
|
92 |
+
cursor: pointer;
|
93 |
+
}
|
94 |
+
|
95 |
+
.upload_hint {
|
96 |
+
margin: 10px;
|
97 |
+
font-size: 14px;
|
98 |
+
color: rgb(200, 200, 200);
|
99 |
+
}
|
100 |
+
|
101 |
+
.result_part {
|
102 |
+
text-align: center;
|
103 |
+
padding: 40px;
|
104 |
+
}
|
105 |
+
|
106 |
+
.result_title {
|
107 |
+
font-size: 18px;
|
108 |
+
font-weight: bold;
|
109 |
+
}
|
110 |
+
|
111 |
+
.result_id {
|
112 |
+
padding: 10px;
|
113 |
+
margin: 15px;
|
114 |
+
font-size: 22px;
|
115 |
+
}
|
116 |
+
|
117 |
+
.result_id_number {
|
118 |
+
margin: 15px;
|
119 |
+
padding: 10px;
|
120 |
+
padding-left: 15px;
|
121 |
+
padding-right: 15px;
|
122 |
+
background-color: #FEED06;
|
123 |
+
color: black;
|
124 |
+
font-weight: bold;
|
125 |
+
font-size: 18px;
|
126 |
+
border-radius: 5px;
|
127 |
+
}
|
app/templates/base.html
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="utf-8">
|
5 |
+
{% if title %}
|
6 |
+
<title>{{ title }} - Classification</title>
|
7 |
+
{% else %}
|
8 |
+
<title>Welcome to Classification Page</title>
|
9 |
+
{% endif %}
|
10 |
+
|
11 |
+
<! Link css file, jquery and tensorflowjs library>
|
12 |
+
<link rel="stylesheet" type="text/css" href="{{ url_for('static',filename='styles/style.css') }}">
|
13 |
+
<script src="https://code.jquery.com/jquery-3.5.1.min.js"
|
14 |
+
integrity="sha256-9/aliU8dGd2tb6OSsuzixeV4y/faTqgFtohetphbbj0=" crossorigin="anonymous"></script>
|
15 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@latest"></script>
|
16 |
+
</head>
|
17 |
+
<body>
|
18 |
+
<ul>
|
19 |
+
<li><a class="{{ status1 }}" href="/flowers"> Flowers </a></li>
|
20 |
+
<li><a class="{{ status2 }}" href="/cats_dogs"> Cats and dogs </a></li>
|
21 |
+
</ul>
|
22 |
+
|
23 |
+
<div class='header'> {{ header }} CLASSIFICATION </div>
|
24 |
+
|
25 |
+
<div class='content'>
|
26 |
+
<div class='upload_part'>
|
27 |
+
<button class='upload_button' id="upload_button"> Choose image file </button>
|
28 |
+
<div class='upload_hint' id='upload_hint'>
|
29 |
+
Available format: PNG, JPG và JPEG
|
30 |
+
</div>
|
31 |
+
<form action="/" method="POST" enctype="multipart/form-data" id='form'>
|
32 |
+
<input type="file" name="file" id="fileinput" accept="image/*" style="display:none">
|
33 |
+
</form>
|
34 |
+
</div>
|
35 |
+
|
36 |
+
<div class='result_part'>
|
37 |
+
<div class='result_title'><b>Result</b></div>
|
38 |
+
<div class='result_id' id="result_info">_</div>
|
39 |
+
<img style="max-width:300px; border-radius:1rem"
|
40 |
+
src="https://reactnativecode.com/wp-content/uploads/2018/02/Default_Image_Thumbnail.png"
|
41 |
+
alt="User Image" id="display_image">
|
42 |
+
</div>
|
43 |
+
</div>
|
44 |
+
</body>
|
45 |
+
{% block scripts %} {% endblock %}
|
46 |
+
</html>
|
app/templates/cats_dogs.html
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends "base.html" %}
|
2 |
+
|
3 |
+
{% block scripts %}
|
4 |
+
<script type="text/javascript">
|
5 |
+
const CLASS = {
|
6 |
+
0: 'Cat',
|
7 |
+
1: 'Dog',
|
8 |
+
};
|
9 |
+
|
10 |
+
// Load model
|
11 |
+
$("document").ready(async function() {
|
12 |
+
model = await tf.loadLayersModel('http://127.0.0.1:5000/static/cat vs dog/tfjs_model/model.json');
|
13 |
+
console.log('Load model');
|
14 |
+
console.log(model.summary());
|
15 |
+
});
|
16 |
+
|
17 |
+
$("#upload_button").click(function() {
|
18 |
+
$("#fileinput").trigger('click');
|
19 |
+
});
|
20 |
+
|
21 |
+
async function predict() {
|
22 |
+
// 1. Convert image to tensor
|
23 |
+
let image = document.getElementById("display_image");
|
24 |
+
let img = tf.browser.fromPixels(image);
|
25 |
+
let normalizationOffset = tf.scalar(255/2); // 127.5
|
26 |
+
let tensor = img
|
27 |
+
.resizeNearestNeighbor([160, 160])
|
28 |
+
.toFloat()
|
29 |
+
.sub(normalizationOffset)
|
30 |
+
.div(normalizationOffset)
|
31 |
+
.reverse(2)
|
32 |
+
.expandDims();
|
33 |
+
|
34 |
+
// 2. Predict
|
35 |
+
let predictions = await model.predict(tensor);
|
36 |
+
predictions = predictions.dataSync();
|
37 |
+
console.log(predictions);
|
38 |
+
|
39 |
+
// 3. Show image on web
|
40 |
+
var index = predictions < 0.5 ? 0 : 1;
|
41 |
+
$("#result_info").append(`${CLASS[index]}`);
|
42 |
+
};
|
43 |
+
|
44 |
+
$("#fileinput").change(function () {
|
45 |
+
let reader = new FileReader();
|
46 |
+
reader.onload = function () {
|
47 |
+
let dataURL = reader.result;
|
48 |
+
|
49 |
+
imEl = document.getElementById("display_image");
|
50 |
+
imEl.onload = function () {
|
51 |
+
predict();
|
52 |
+
}
|
53 |
+
$("#display_image").attr("src", dataURL);
|
54 |
+
$("#result_info").empty();
|
55 |
+
}
|
56 |
+
|
57 |
+
let file = $("#fileinput").prop("files")[0];
|
58 |
+
reader.readAsDataURL(file);
|
59 |
+
});
|
60 |
+
</script>
|
61 |
+
{% endblock %}
|
app/templates/flowers.html
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends "base.html" %}
|
2 |
+
|
3 |
+
{% block scripts %}
|
4 |
+
<script type="text/javascript">
|
5 |
+
const FLOWER_CLASS = {
|
6 |
+
0: 'Daisy',
|
7 |
+
1: 'Dandelion',
|
8 |
+
2: 'Rose',
|
9 |
+
3: 'Sunflower',
|
10 |
+
4: 'Tulip'
|
11 |
+
};
|
12 |
+
|
13 |
+
// Load model
|
14 |
+
$("document").ready(async function() {
|
15 |
+
model = await tf.loadLayersModel('http://127.0.0.1:5000/static/flowers/tfjs_model/model.json');
|
16 |
+
console.log('Load model');
|
17 |
+
console.log(model.summary());
|
18 |
+
});
|
19 |
+
|
20 |
+
$("#upload_button").click(function() {
|
21 |
+
$("#fileinput").trigger('click');
|
22 |
+
});
|
23 |
+
|
24 |
+
async function predict() {
|
25 |
+
// 1. Convert image to tensor
|
26 |
+
let image = document.getElementById("display_image");
|
27 |
+
let img = tf.browser.fromPixels(image);
|
28 |
+
let normalizationOffset = tf.scalar(255/2); // 127.5
|
29 |
+
let tensor = img
|
30 |
+
.resizeNearestNeighbor([224, 224])
|
31 |
+
.toFloat()
|
32 |
+
.sub(normalizationOffset)
|
33 |
+
.div(normalizationOffset)
|
34 |
+
.reverse(2)
|
35 |
+
.expandDims();
|
36 |
+
|
37 |
+
// 2. Predict
|
38 |
+
let predictions = await model.predict(tensor);
|
39 |
+
predictions = predictions.dataSync();
|
40 |
+
console.log(predictions);
|
41 |
+
|
42 |
+
// 3. Show image on web
|
43 |
+
let top5 = Array.from(predictions)
|
44 |
+
.map(function(p, i) {
|
45 |
+
return {
|
46 |
+
probability: p,
|
47 |
+
className: FLOWER_CLASS[i]
|
48 |
+
};
|
49 |
+
})
|
50 |
+
|
51 |
+
.sort(function(a, b) {
|
52 |
+
return b.probability - a.probability;
|
53 |
+
});
|
54 |
+
|
55 |
+
console.log(top5);
|
56 |
+
$("#result_info").empty();
|
57 |
+
|
58 |
+
$("#result_info").append(`${top5[0].className}`);
|
59 |
+
};
|
60 |
+
|
61 |
+
$("#fileinput").change(function () {
|
62 |
+
let reader = new FileReader();
|
63 |
+
reader.onload = function () {
|
64 |
+
let dataURL = reader.result;
|
65 |
+
|
66 |
+
imEl = document.getElementById("display_image");
|
67 |
+
imEl.onload = function () {
|
68 |
+
predict();
|
69 |
+
}
|
70 |
+
$("#display_image").attr("src", dataURL);
|
71 |
+
$("#result_info").empty();
|
72 |
+
}
|
73 |
+
|
74 |
+
let file = $("#fileinput").prop("files")[0];
|
75 |
+
reader.readAsDataURL(file);
|
76 |
+
});
|
77 |
+
</script>
|
78 |
+
{% endblock %}
|
cat_dog.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import tensorflow as tf
|
3 |
+
|
4 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
5 |
+
|
6 |
+
|
7 |
+
# init necessary variables
|
8 |
+
BATCH_SIZE = 32
|
9 |
+
IMG_SIZE = (160, 160)
|
10 |
+
IMG_SHAPE = IMG_SIZE + (3,)
|
11 |
+
|
12 |
+
# 1. Download dataset
|
13 |
+
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
|
14 |
+
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
|
15 |
+
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
|
16 |
+
train_dir = os.path.join(PATH, 'train')
|
17 |
+
validation_dir = os.path.join(PATH, 'validation')
|
18 |
+
|
19 |
+
# 2. Data augmentation
|
20 |
+
image_gen = ImageDataGenerator(tf.keras.applications.mobilenet_v2.preprocess_input,
|
21 |
+
rescale=1./255, rotation_range=40,
|
22 |
+
width_shift_range=0.2, height_shift_range=0.2,
|
23 |
+
shear_range=0.2, zoom_range=0.2,
|
24 |
+
horizontal_flip=True, fill_mode='nearest')
|
25 |
+
|
26 |
+
train_data_gen = image_gen.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir,
|
27 |
+
shuffle=True, target_size=IMG_SIZE, class_mode='binary')
|
28 |
+
|
29 |
+
val_data_gen = image_gen.flow_from_directory(batch_size=BATCH_SIZE, directory=validation_dir,
|
30 |
+
shuffle=True, target_size=IMG_SIZE, class_mode='binary')
|
31 |
+
|
32 |
+
# 3. Create the base model from the pre-trained model MobileNet V2
|
33 |
+
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')
|
34 |
+
|
35 |
+
# Freeze base model
|
36 |
+
base_model.trainable = False
|
37 |
+
|
38 |
+
# Connect new predict output to base model
|
39 |
+
x = base_model.output
|
40 |
+
x = tf.keras.layers.GlobalAveragePooling2D()(x)
|
41 |
+
x = tf.keras.layers.Dropout(0.2)(x)
|
42 |
+
outputs = tf.keras.layers.Dense(1)(x)
|
43 |
+
model = tf.keras.Model(base_model.input, outputs)
|
44 |
+
|
45 |
+
# Set up the learning process
|
46 |
+
base_learning_rate = 0.0001
|
47 |
+
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
|
48 |
+
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
|
49 |
+
metrics=['accuracy'])
|
50 |
+
# 4. Train
|
51 |
+
initial_epochs = 10
|
52 |
+
|
53 |
+
# save best model
|
54 |
+
checkpoint = tf.keras.callbacks.ModelCheckpoint('cat vs dog/best.h5', monitor='val_loss',
|
55 |
+
save_best_only=True, mode='auto')
|
56 |
+
callback_list = [checkpoint]
|
57 |
+
|
58 |
+
# transfer learning
|
59 |
+
history = model.fit(train_data_gen, epochs=initial_epochs,
|
60 |
+
validation_data=val_data_gen)
|
61 |
+
|
62 |
+
# fine tune
|
63 |
+
fine_tune_epochs = 10
|
64 |
+
total_epochs = initial_epochs + fine_tune_epochs
|
65 |
+
|
66 |
+
# unfreeze base model
|
67 |
+
base_model.trainable = True
|
68 |
+
|
69 |
+
# Fine-tune from this layer onwards
|
70 |
+
fine_tune_at = 100
|
71 |
+
|
72 |
+
# Freeze all the layers before the `fine_tune_at` layer
|
73 |
+
for layer in base_model.layers[:fine_tune_at]:
|
74 |
+
layer.trainable = False
|
75 |
+
|
76 |
+
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
|
77 |
+
optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate/10),
|
78 |
+
metrics=['accuracy'])
|
79 |
+
|
80 |
+
history_fine = model.fit(train_data_gen, epochs=total_epochs, initial_epoch=history.epoch[-1],
|
81 |
+
validation_data=val_data_gen, callbacks=callback_list)
|
82 |
+
|
flowers.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import glob
|
3 |
+
import shutil
|
4 |
+
import tensorflow as tf
|
5 |
+
|
6 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
7 |
+
|
8 |
+
# init necessary variables
|
9 |
+
BATCH_SIZE = 32
|
10 |
+
IMG_SIZE = (224, 224)
|
11 |
+
IMG_SHAPE = IMG_SIZE + (3,)
|
12 |
+
num_classes = 5
|
13 |
+
|
14 |
+
# 1. Download dataset
|
15 |
+
_URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
|
16 |
+
path_to_zip = tf.keras.utils.get_file(origin=_URL, fname="flower_photos.tgz", extract=True)
|
17 |
+
base_dir = os.path.join(os.path.dirname(path_to_zip), 'flower_photos')
|
18 |
+
classes = ['roses', 'daisy', 'dandelion', 'sunflowers', 'tulips']
|
19 |
+
|
20 |
+
# create train and validation set
|
21 |
+
for cl in classes:
|
22 |
+
img_path = os.path.join(base_dir, cl)
|
23 |
+
images = glob.glob(img_path + '/*.jpg')
|
24 |
+
# print("{}: {} Images".format(cl, len(images)))
|
25 |
+
num_train = int(round(len(images)*0.8))
|
26 |
+
train, val = images[:num_train], images[num_train:]
|
27 |
+
|
28 |
+
for t in train:
|
29 |
+
if not os.path.exists(os.path.join(base_dir, 'train', cl)):
|
30 |
+
os.makedirs(os.path.join(base_dir, 'train', cl))
|
31 |
+
shutil.move(t, os.path.join(base_dir, 'train', cl))
|
32 |
+
|
33 |
+
for v in val:
|
34 |
+
if not os.path.exists(os.path.join(base_dir, 'val', cl)):
|
35 |
+
os.makedirs(os.path.join(base_dir, 'val', cl))
|
36 |
+
shutil.move(v, os.path.join(base_dir, 'val', cl))
|
37 |
+
|
38 |
+
train_dir = os.path.join(base_dir, 'train')
|
39 |
+
val_dir = os.path.join(base_dir, 'val')
|
40 |
+
|
41 |
+
# 2. Data augmentation
|
42 |
+
image_gen = ImageDataGenerator(tf.keras.applications.mobilenet_v2.preprocess_input,
|
43 |
+
rescale=1./255, rotation_range=45,
|
44 |
+
width_shift_range=0.2, height_shift_range=0.2,
|
45 |
+
shear_range=0.3, zoom_range=0.5, horizontal_flip=True)
|
46 |
+
|
47 |
+
train_data_gen = image_gen.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir,
|
48 |
+
shuffle=True, target_size=IMG_SIZE, class_mode='sparse')
|
49 |
+
|
50 |
+
val_data_gen = image_gen.flow_from_directory(batch_size=BATCH_SIZE, directory=val_dir,
|
51 |
+
shuffle=True, target_size=IMG_SIZE, class_mode='sparse')
|
52 |
+
|
53 |
+
# 3. Create the base model from the pre-trained model MobileNet V2
|
54 |
+
base_model = tf.keras.applications.mobilenet_v2.MobileNetV2(input_shape=IMG_SHAPE,
|
55 |
+
weights='imagenet', include_top=False)
|
56 |
+
# Freeze base model
|
57 |
+
base_model.trainable = False
|
58 |
+
|
59 |
+
# Connect new predict output to base model
|
60 |
+
x = base_model.output
|
61 |
+
x = tf.keras.layers.GlobalAveragePooling2D()(x)
|
62 |
+
x = tf.keras.layers.Dense(1024, activation='relu')(x)
|
63 |
+
x = tf.keras.layers.Dropout(0.2)(x)
|
64 |
+
x = tf.keras.layers.Dense(512, activation='relu')(x)
|
65 |
+
outputs = tf.keras.layers.Dense(num_classes)(x)
|
66 |
+
model = tf.keras.Model(base_model.inputs, outputs)
|
67 |
+
|
68 |
+
# Set up the learning process
|
69 |
+
model.compile(optimizer='adam',
|
70 |
+
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
71 |
+
metrics=['accuracy'])
|
72 |
+
|
73 |
+
# 4. Train
|
74 |
+
epochs = 10
|
75 |
+
# save best model
|
76 |
+
checkpoint = tf.keras.callbacks.ModelCheckpoint('flowers/best.h5', monitor='val_loss',
|
77 |
+
save_best_only=True, mode='auto')
|
78 |
+
callback_list = [checkpoint]
|
79 |
+
|
80 |
+
history = model.fit(train_data_gen, epochs=10, validation_data=val_data_gen, callbacks=callback_list)
|
main.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from app import app
|
2 |
+
|
3 |
+
if __name__ == "__main__":
|
4 |
+
app.run()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
tensorflow
|
2 |
+
tensorflowjs
|
3 |
+
flask
|