Spaces:
Running
Running
shujianong
commited on
Commit
•
1d84f16
1
Parent(s):
ccff8eb
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""gradio-ui-pokemon-card-authenticator.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colaboratory.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1PfMek8tf9Ztk5QCiorsFU3fMiUNQNnGn
|
8 |
+
|
9 |
+
# Introduction #
|
10 |
+
|
11 |
+
Thanks to Kaggle mini-courses on computer vision for getting me started on this.
|
12 |
+
My first trained CNN to classify the card images heavily inspired by Francesco Marazz's CNN (https://www.kaggle.com/fmarazzi/baseline-keras-cnn-roc-fast-10min-0-925-lb) for the Histopathologic Cancer Detection competition (https://www.kaggle.com/c/histopathologic-cancer-detection).
|
13 |
+
|
14 |
+
My objective here is to build my first image classifier using Keras. Rare Pokemon cards are highly sought after by collectors, with some even reaching re-sale prices of hundreds of thousands of dollars. It would be important for collectors to tell a genuine card from fake card to avoid getting duped. Experienced collectors are able to tell the different between a genuine and fake card by looking at the features of the back of the card alone, but this might not be so obvious for newcomers. The image classifier described below will aim to classify, with high accuracy (>95%), Pokemon cards as genuine or fake based on the back visuals of the card alone. In future work, the model can be made more robust by including more counterfeit variations, folded, torn, new and old cards in the training, validation and test sets.
|
15 |
+
|
16 |
+
# Preprocess Image
|
17 |
+
User should upload a landscape image with the Pokemon card image positioned upright in the centre of the photo
|
18 |
+
"""
|
19 |
+
|
20 |
+
import cv2
|
21 |
+
import matplotlib.pyplot as plt
|
22 |
+
import matplotlib.patches as patches
|
23 |
+
from PIL import Image
|
24 |
+
import tensorflow as tf
|
25 |
+
import numpy as np
|
26 |
+
from tensorflow.keras.models import load_model
|
27 |
+
|
28 |
+
def readImage(path):
|
29 |
+
# OpenCV reads the image in bgr format by default
|
30 |
+
bgr_img = cv2.imread(path)
|
31 |
+
# We flip it to rgb for visualization purposes
|
32 |
+
b,g,r = cv2.split(bgr_img)
|
33 |
+
rgb_img = cv2.merge([r,g,b])
|
34 |
+
return rgb_img
|
35 |
+
|
36 |
+
def crop_center(img):
|
37 |
+
y,x,z = img.shape
|
38 |
+
startx = (x-y)//2
|
39 |
+
starty = 0
|
40 |
+
return img[starty:y,startx:startx+y]
|
41 |
+
|
42 |
+
def resizeImage(img):
|
43 |
+
r = 256.0 / img.shape[0]
|
44 |
+
dim = (int(img.shape[1] * r), 256)
|
45 |
+
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
|
46 |
+
return resized
|
47 |
+
|
48 |
+
model = load_model("/content/drive/MyDrive/Colab Notebooks/pkm_card.h5")
|
49 |
+
|
50 |
+
"""# Define function"""
|
51 |
+
|
52 |
+
def pkm_predict(image):
|
53 |
+
image = crop_center(image)
|
54 |
+
print(image.shape)
|
55 |
+
image = resizeImage(image)
|
56 |
+
print(image.shape)
|
57 |
+
K_test = (image - image.mean()) / image.std()
|
58 |
+
print(image.shape)
|
59 |
+
K_test = np.reshape(K_test, (1, 256, 256, 3))
|
60 |
+
predictions = model.predict(K_test)
|
61 |
+
print(predictions)
|
62 |
+
predictions = list(map(lambda x: 0 if x<0.5 else 1, predictions)) # get binary values predictions with 0.5 as thresold
|
63 |
+
print(predictions)
|
64 |
+
if predictions == [1]:
|
65 |
+
return "This card is likely genuine."
|
66 |
+
else:
|
67 |
+
return "This card is likely counterfeit."
|
68 |
+
|
69 |
+
"""# Install Gradio #"""
|
70 |
+
|
71 |
+
!pip install -q gradio
|
72 |
+
|
73 |
+
import gradio as gr
|
74 |
+
image = gr.inputs.Image()
|
75 |
+
|
76 |
+
iface= gr.Interface(
|
77 |
+
fn=pkm_predict,
|
78 |
+
inputs=image,
|
79 |
+
outputs="text",
|
80 |
+
)
|
81 |
+
|
82 |
+
iface.launch()
|