File size: 4,742 Bytes
53db93c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20e4bbe
baafb27
8a36508
800e571
 
 
53db93c
baafb27
3e819c8
0fbd8bd
ec0a064
 
 
059489a
 
ec0a064
2192db2
 
 
 
ec0a064
0fbd8bd
53db93c
0fb713b
53db93c
800e571
53db93c
 
800e571
53db93c
 
 
8a0b9c7
 
53db93c
c5554e4
53db93c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c5554e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b030c42
8a0b9c7
 
 
4adc3f9
baafb27
64216e0
2192db2
53db93c
 
 
c5554e4
8a0b9c7
53db93c
 
baafb27
059489a
 
53db93c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
# Copyright 2021 Asuhariet Ygvar
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
# http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.

import sys
import onnxruntime
import numpy as np
from PIL import Image
import gradio as gr
import torch
import os 
os.system('wget https://www.dropbox.com/s/ggf6ok63u7hywhc/neuralhash_128x96_seed1.dat')
os.system('wget https://www.dropbox.com/s/1jug4wtevz1rol0/model.onnx')


torch.hub.download_url_to_file('https://cdn.pixabay.com/photo/2017/09/11/15/58/sunset-2739472_1280.jpg', 'sunset.jpg')
torch.hub.download_url_to_file('https://i.imgur.com/W8aXbd2.png', 'rotate.png')

torch.hub.download_url_to_file('https://user-images.githubusercontent.com/1328/129860794-e7eb0132-d929-4c9d-b92e-4e4faba9e849.png', 'dog.png')
torch.hub.download_url_to_file('https://user-images.githubusercontent.com/1328/129860810-f414259a-3253-43e3-9e8e-a0ef78372233.png', 'same.png')

torch.hub.download_url_to_file('https://cdn.pixabay.com/photo/2021/08/23/17/53/cat-6568422_1280.jpg', 'cat1.jpg')
torch.hub.download_url_to_file('https://i.imgur.com/fMoVhSz.png', 'cat2.png')

torch.hub.download_url_to_file('https://cdn.pixabay.com/photo/2021/08/10/09/41/lesser-sand-plover-6535531_1280.jpg', 'bird1.jpg')
torch.hub.download_url_to_file('https://i.imgur.com/jDgKAC7.png', 'bird2.png')






# Load ONNX model
session = onnxruntime.InferenceSession('model.onnx')

# Load output hash matrix
seed1 = open('neuralhash_128x96_seed1.dat', 'rb').read()[128:]
seed1 = np.frombuffer(seed1, dtype=np.float32)
seed1 = seed1.reshape([96, 128])

pre_text = "<p style='display: flex; flex-grow: 1; align-items: center; justify-content: center; padding: 2rem 1rem; font-size: 1.5rem; line-height: 2rem; font-weight: 400;'>{}</p>"

# Preprocess image
def inference(img, img2):
  image = Image.open(img.name).convert('RGB')
  image = image.resize([360, 360])
  arr = np.array(image).astype(np.float32) / 255.0
  arr = arr * 2.0 - 1.0
  arr = arr.transpose(2, 0, 1).reshape([1, 3, 360, 360])
  
  # Run model
  inputs = {session.get_inputs()[0].name: arr}
  outs = session.run(None, inputs)
  
  # Convert model output to hex hash
  hash_output = seed1.dot(outs[0].flatten())
  hash_bits = ''.join(['1' if it >= 0 else '0' for it in hash_output])
  hash_hex = '{:0{}x}'.format(int(hash_bits, 2), len(hash_bits) // 4)
  
  image2 = Image.open(img2.name).convert('RGB')
  image2 = image2.resize([360, 360])
  arr2 = np.array(image2).astype(np.float32) / 255.0
  arr2 = arr2 * 2.0 - 1.0
  arr2 = arr2.transpose(2, 0, 1).reshape([1, 3, 360, 360])
  
  # Run model
  inputs2 = {session.get_inputs()[0].name: arr2}
  outs2 = session.run(None, inputs2)
  
  # Convert model output to hex hash
  hash_output2 = seed1.dot(outs2[0].flatten())
  hash_bits2 = ''.join(['1' if it >= 0 else '0' for it in hash_output2])
  hash_hex2 = '{:0{}x}'.format(int(hash_bits2, 2), len(hash_bits2) // 4)
  
  if hash_hex == hash_hex2:
    return pre_text.format("Same Hash"), pre_text.format(hash_hex), pre_text.format(hash_hex2)
  return pre_text.format("Different Hash"), pre_text.format(hash_hex), pre_text.format(hash_hex2)
   
title = "AppleNeuralHash"
description = "Gradio demo for Apple NeuralHash, a perceptual hashing method for images based on neural networks. It can tolerate image resize and compression. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://www.apple.com/child-safety/pdf/CSAM_Detection_Technical_Summary.pdf'>CSAM Detection Technical Summary</a> | <a href='https://github.com/AsuharietYgvar/AppleNeuralHash2ONNX'>Github Repo</a> | <a href='https://github.com/AsuharietYgvar/AppleNeuralHash2ONNX/issues/1'>Working Collision example images from github issue</a></p> "
examples = [['sunset.jpg','rotate.png'],['dog.png','same.png'],['cat1.jpg','cat2.png'],['bird1.jpg','bird2.png']]

gr.Interface(
    inference, 
    [gr.inputs.Image(type="file", label="Input Image"),gr.inputs.Image(type="file", label="Input Image")], 
    [gr.outputs.HTML(label="Comparison.."), gr.outputs.HTML(label="First Hash"), gr.outputs.HTML(label="Second Hash")],
    title=title,
    description=description,
    article=article,
    examples=examples,
    allow_flagging=False
    ).launch(debug=True)