Trees / app.py
Humboldt's picture
Update app.py
8d83de8
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 13 16:58:28 2021
"""
import time
import streamlit as st
import tensorflow as tf
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
import numpy as np
import pandas as pd
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
from models import (YoloV3, YoloV3Tiny)
from dataset import transform_images
from utils import draw_outputs
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.patches import Rectangle
from geotiff import GeoTiff
from PIL import Image
import os
import sys
import requests
image = load_img('trees.jpg')
image = img_to_array(image).astype('float')/255
st.image(image)
c1, c2, c3 = st.columns([0.2, 0.6, 0.2])
file_str=c2.text_input(label='URL to GeoTIFF file', value='')
if file_str:
filepath = 'example.tif'
file=filepath
#file = wget.download(file_str, out=filepath)
#file = wget.download(file_str)
r = requests.get(file_str, allow_redirects=True)
open(filepath, 'wb').write(r.content)
geo_tiff = GeoTiff(filepath)
# the original crs code
# geo_tiff.crs_code
# the current crs code
# geo_tiff.as_crs
# the shape of the tiff
# geo_tiff.tif_shape
# the bounding box in the as_crs CRS
# geo_tiff.tif_bBox
# the bounding box as WGS 84
# geo_tiff.tif_bBox_wgs_84
# the bounding box in the as_crs converted coordinates
# geo_tiff.tif_bBox_converted
i = geo_tiff.tif_shape[1]
j = geo_tiff.tif_shape[0]
# in the as_crs coords
# geo_tiff.get_coords(i, j)
# in WGS 84 coords
print('Koordinaten')
print(geo_tiff.get_wgs_84_coords(i, j))
print(geo_tiff.get_wgs_84_coords(0, 0))
# degrees per Pixel in x-direction
deg_pixel_x = (geo_tiff.get_wgs_84_coords(i, j)[
0]-geo_tiff.get_wgs_84_coords(0, 0)[0])/(i, -j)[0]
deg_pixel_y = (geo_tiff.get_wgs_84_coords(i, j)[
1]-geo_tiff.get_wgs_84_coords(0, 0)[1])/(i, -j)[1]
start_x = geo_tiff.get_wgs_84_coords(0, 0)[0]
start_y = geo_tiff.get_wgs_84_coords(i, j)[1]
#print(start_x, start_y)
#print(deg_pixel_x,deg_pixel_y )
#print('_'*50 + ' Ende '+ '_'*50)
#size = 416
#area_box = [(start_x+int(i/size/2)*deg_pixel_x*size, start_y+int(j/size/2)*deg_pixel_y*size), (start_x+int(i/size/2) *
# deg_pixel_x*size+size*deg_pixel_x, start_y+int(j/size/2)*deg_pixel_y*size+size*deg_pixel_y)]
#array = geo_tiff.read_box(area_box.copy())
size=(416, 416)
Image.MAX_IMAGE_PIXELS = 10000000000
with Image.open(file) as im:
im.thumbnail(size)
gloabl_image = c2.image(im)
#gloabl_image = c2.image(array/255)
threshold = c2.text_input(
label='Detection threshold: Reduce to detect more trees, increase to remove duplicates', value=0.3)
button = c2.button('Start detecting defect trees')
my_bar = c2.progress(0)
if button == True:
size = 416
FLAGS(sys.argv)
flags.DEFINE_string('classes', 'trees_simple.names', 'path to classes file')
flags.DEFINE_string('weights', 'checkpoints/trees_all.tf',
'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('video', './data/video.mp4',
'path to video file or number for webcam)')
flags.DEFINE_string('output', './data/video2.mp4', 'path to output video')
flags.DEFINE_string('output_format', 'XVID',
'codec used in VideoWriter when saving video to file')
flags.DEFINE_integer('num_classes', 5, 'number of classes in the model')
flags.DEFINE_float('yolo_iou_threshold', 0.5, 'iou threshold')
flags.DEFINE_float('yolo_score_threshold', float(threshold), 'score threshold')
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
tf.config.experimental.set_memory_growth(physical_device, True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.load_weights(FLAGS.weights)
logging.info('weights loaded')
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
logging.info('classes loaded')
times = []
try:
vid = cv2.VideoCapture(int(FLAGS.video))
except:
vid = cv2.VideoCapture(FLAGS.video)
out = None
images = []
bboxes_x_found = []
bboxes_y_found = []
classes_found = []
scores_found = []
fig = plt.figure()
canvas = FigureCanvasAgg()
ax = fig.add_subplot()
ax.axis('off')
imgg = st.image([], width=300)
z2 = pd.DataFrame(np.ones((0, 4)), columns=['Class', 'Certainty', 'Longitude', 'Lattitude'])
datafr = c2.dataframe(data=z2)
for m in (range(int(i/size))):
my_bar.progress(int((m+1)/int(i/size)*100))
for n in range(int(j/size)):
area_box = [(start_x+m*deg_pixel_x*size, start_y+n*deg_pixel_y*size), (start_x+m *
deg_pixel_x*size+size*deg_pixel_x, start_y+n*deg_pixel_y*size+size*deg_pixel_y)]
array = geo_tiff.read_box(area_box.copy())
img = array
# img_in = np.arra([img[:, :, :3], img[:, :, :3], img[:, :, :3]])
img_in = tf.expand_dims(img[:, :, :3], 0)
img_in = transform_images(img_in, FLAGS.size)
t1 = time.time()
boxes, scores, classes, nums = yolo.predict(img_in, verbose=False)
#print('image min max:', img.min(), img.max(), img.shape)
#images.append(img.astype('float')/255)
#imgg.image(images, width=230)
if nums > 0:
ax.cla()
ax.imshow(im)
rect = Rectangle((m, n), 416, 416, linewidth=2,
edgecolor='r', facecolor='none')
ax.add_patch(rect)
ax.draw(canvas.get_renderer())
im = np.array(canvas.buffer_rgba())
# gloabl_image.image(im)
t2 = time.time()
times.append(t2-t1)
times = times[-20:]
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
img = cv2.putText(img, "Time: {:.2f}ms".format(sum(times)/len(times)*1000), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
images.append(img/255)
imgg.image(images, width=230)
for ind in range(nums[0]):
classes_found.append(class_names[int(classes[0][ind])])
scores_found.append(np.array(scores[0][ind]))
bboxes_x_found.append(
np.array(boxes[0][ind][0]*deg_pixel_x*size+start_x+m*deg_pixel_x*size))
bboxes_y_found.append(
np.array(boxes[0][ind][1]*deg_pixel_y*size+start_y+n*deg_pixel_y*size))
# plt.imshow(img)
# plt.show()
if len(classes_found) != 0:
classes_found_np = np.array(classes_found).reshape(-1, 1).astype('str')
bboxes_x_found_np = np.array(bboxes_x_found).reshape(-1, 1)
bboxes_y_found_np = np.array(bboxes_y_found).reshape(-1, 1)
scores_found_np = np.array(scores_found).reshape(-1, 1).astype('float64')
found = np.concatenate((classes_found_np, scores_found_np,
bboxes_x_found_np, bboxes_y_found_np), axis=1)
# np.savetxt(r'C:\Users\alfa\Desktop\Python\Baum Projekt Labels\found trees.txt',
# found, fmt=['%s', '%.0f', '%.7f', '%.7f'])
z2 = found
z2 = pd.DataFrame(
z2, columns=['Class', 'Certainty', 'Longitude', 'Lattitude'])
datafr.dataframe(data=z2)
if len(classes_found) != 0:
z3 = z2.to_csv()
st.download_button('Download *.csv file', z3)