#!/usr/bin/env python3

import os
import glob
import os.path
import shelve # -> database
import argparse
import imagehash

from PIL import Image


parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset',
                    help="/path/to/dataset/of/images")
parser.add_argument('-s', '--shelve', required=True,
                    help='/path/to/output/shelve/database')
parser.add_argument('-q', '--query', help='/path/to/the/query/image')
opts = parser.parse_args()



# file
def is_image(f):
    # by HEADER ?
    ext = os.path.splitext(f).lower()
    __images__ = ('.png', '.jpg', '.jpeg', '.bmp', '.gif', '.tif')
    return ext in __images__


def fingerprint(path):
    # imagehash.average_hash; imagehash.phash
    # so how about use the SQL database?
    return imagehash.dhash(Image.open(path))


def build():
    if not opts.dataset:
        parser.print_help()
        raise argparse.ArgumentError("dataset", "tell me the /path/to/images")
    def func(f):
        h = str(fingerprint(os.path.join(root, f)))
        # insert to the database
        # use basename or fullpath?
        db[h] = db.get(h, []) + [os.path.basename(f)]
    with shelve.open(opts.shelve, writeback=True) as db:
        for root, dirs, files in os.walk(opts.dataset):
            map(func, files) # zt.parallel(func, files)
    print("[+] Done.")


# use Hamming distance or Edit distance
# import operator; sum(map(operator.ne, s1, s2))
# import distance; distance.hamming(s1, s2) # but cost 3x time than map
# 1. compare the h.hash.sum
# 2. compare the distance
# *. sum(operator.ne(*map(lambda x: x.hash.flatten(), (s1, s2))))
def search(similarity=1.0):
    with shelve.open(opts.shelve):
        names = db[ str(fingerprint(opts.query)) ]
        print('[+] %d images founded.' % len(names))
    return names
