import os
import face_recognition
import hashlib

import sqlite3

import file_util as fu

import time_helper

import numpy as np
import math

valid_ext_arr = ['jpg', 'jpeg', 'png', 'psd', 'gif', 'pef', 'bmp']

conn = None
cursor = None
SIZE = 10
model_data_base_dir = './model_data/'


def dict_factory(cursor, row):
    d = {}
    for idx, col in enumerate(cursor.description):
        d[col[0]] = row[idx]
    return d


def init_sqlite():
    global conn
    global cursor
    conn = sqlite3.connect('app.db')
    conn.row_factory = dict_factory
    cursor = conn.cursor()


def get_eg_img():
    eg_img = face_recognition.load_image_file("D:\\Workspace\\my\\scripts\\python\\file_tidy\\eg\\aobama.jpg")
    eg_img_encoding = face_recognition.face_encodings(eg_img)[0]


def count_img_dict_from_db():
    num = 0
    try:
        sql = 'select count(*) num from img_dict'
        cursor.execute(sql)
        row = cursor.fetchone()
        num = row.get('num', 0)
    except Exception as e:
        print(e)
    return num


def query_img_dict_from_db(page):
    rows = []
    try:
        offset = (page - 1) * SIZE
        sql = 'select * from img_dict limit {},{}'.format(offset, SIZE)
        cursor.execute(sql)
        rows = cursor.fetchall()
    except Exception as e:
        print(e)
    return rows


def query_files(page):
    rows = []
    try:
        offset = (page - 1) * SIZE
        sql = 'select * from os_storage_files where ctg=1 limit {},{}'.format(offset, SIZE)
        cursor.execute(sql)
        rows = cursor.fetchall()
    except Exception as e:
        print(e)
    return rows


def count_files():
    num = 0
    try:
        sql = 'select count(*) num from os_storage_files where ctg=1'
        cursor.execute(sql)
        row = cursor.fetchone()
        num = row.get('num', 0)
    except Exception as e:
        print(e)
    return num


def save_file_info(data):
    try:
        sql = 'insert into os_storage_files (size,dir,filename,ext,sign,num,ctime,ctg) values (?,?,?,?,?,?,?,?)'
        cursor.execute(sql, data)
        conn.commit()
    except Exception as e:
        print(e)


def update_file_face_num(file_id, face_num):
    try:
        sql = 'update os_storage_files set face_num=? where id=?'
        cursor.execute(sql, [face_num, file_id])
        conn.commit()
    except Exception as e:
        print(e)


def save_dup_file_info(data):
    try:
        sql = 'insert into os_storage_duplicate_files (size,dir,filename,sign) values (?,?,?,?)'
        cursor.execute(sql, data)
        conn.commit()
        sql = 'update os_storage_files set num=num+1 where sign=?'
        cursor.execute(sql, [data[3]])
        conn.commit()
    except Exception:
        pass


def save_img_recognize_results(data):
    try:
        sql = 'insert into img_recognize_results (img_dict_id,img_file_id) values (?,?)'
        cursor.execute(sql, data)
        conn.commit()
    except Exception:
        pass


def get_file(sign):
    row = {}
    try:
        sql = 'select id,dir,filename from os_storage_files where sign=? limit 1'
        cursor.execute(sql, [sign])
        data = cursor.fetchone()
        if data is not None:
            row = data
    except Exception as e:
        print(e)
        pass
    return row


def tidy_dir_files(target_dir):
    files = []
    list_files = os.listdir(target_dir)
    num = 0
    for i in range(0, len(list_files)):
        path = os.path.join(target_dir, list_files[i])
        if os.path.isdir(path):
            files.extend(tidy_dir_files(path))
        elif os.path.isfile(path):
            num += 1
            print(num)
            ext = fu.get_file_extension(path)
            size = os.path.getsize(path)
            if size > 536870912:
                sign = get_big_file_md5(path)
            else:
                sign = get_file_md5(path)
            basedir, filename = os.path.split(path)
            db_file = get_file(sign)
            db_file_id = db_file.get('id', 0)
            if db_file_id > 0:
                db_dir = db_file.get('dir')
                db_filename = db_file.get('filename')
                if db_dir != basedir or filename != db_filename:
                    data = (size, basedir, filename, sign)
                    save_dup_file_info(data)
                else:
                    print('文件重复')
            else:
                ctg = 0
                if ext in valid_ext_arr:
                    ctg = 1
                ctime = time_helper.ts_to_time_str(os.path.getctime(path), '%Y-%m-%d %H:%M:%S')
                data = (size, basedir, filename, ext, sign, 0, ctime, ctg)
                save_file_info(data)
    return files


def loop_exec():
    dir_all_files = get_all_files('D:\\Workspace\\my\\scripts\\python\\file_tidy\\x')
    arr = []
    for file_obj in dir_all_files:
        file_path = file_obj.get('path')
        unknown_img = face_recognition.load_image_file(file_path)
        unknown_img_encoding = face_recognition.face_encodings(unknown_img)[0]
        results = face_recognition.compare_faces([eg_img_encoding], unknown_img_encoding)
        is_match = results[0]
        arr.append({'path': file_path, 'result': is_match})
    print(arr)


# 获取文件的MD5值，适用于小文件
def get_file_md5(filepath):
    if os.path.isfile(filepath):
        md5obj = hashlib.md5()
        with open(filepath, 'rb') as f:
            md5obj.update(f.read())
        return str(md5obj.hexdigest())
    return None


# 获取文件的MD5值，适用于小文件
def get_big_file_md5(filepath):
    if os.path.isfile(filepath):
        f = open(filepath, 'rb')
        md5_obj = hashlib.md5()
        while True:
            d = f.read(8096)
            if not d:
                break
            md5_obj.update(d)
        hash_code = md5_obj.hexdigest()
        f.close()
        md5 = str(hash_code).lower()
    return md5


# 获取文件的sha1值，适用于小文件
def get_file_sha1(filepath):
    if os.path.isfile(filepath):
        with open(filepath, 'rb') as f:
            sha1obj = hashlib.sha1()
            sha1obj.update(f.read())
            hash = sha1obj.hexdigest()
            return str(hash).upper()
    return None


# 缓存字典图片
def get_dict_img_data():
    total = count_img_dict_from_db()
    total_page = math.ceil(total / SIZE)
    known_arr = []
    known_id_arr = []
    for page in range(1, total_page + 1):
        img_dict_rows = query_img_dict_from_db(page)
        for img_dict_row in img_dict_rows:
            img_dict_id = img_dict_row.get('id')
            img_dict_path = img_dict_row.get('path')
            model_data_path = model_data_base_dir + '{}.npy'.format(img_dict_id)
            if os.path.exists(model_data_path):
                fe_img_encoding = np.load(model_data_path)
            else:
                fe_img = face_recognition.load_image_file(img_dict_path)
                fe_img_encoding = face_recognition.face_encodings(fe_img)[0]
                np.save(model_data_base_dir + '{}'.format(img_dict_id), fe_img_encoding, allow_pickle=True,
                        fix_imports=True)
            known_id_arr.append(img_dict_id)
            known_arr.append(fe_img_encoding)
    return known_arr, known_id_arr


def compare_faces(known_arr, unknown_img_path):
    unknown_img = face_recognition.load_image_file(unknown_img_path)
    unknown_img_encoding_arr = face_recognition.face_encodings(unknown_img)
    results_arr = []
    for unknown_img_encoding in unknown_img_encoding_arr:
        results = face_recognition.compare_faces(known_arr, unknown_img_encoding)
        results_arr.append(results)
    return results_arr


def batch_mark_img():
    known_arr, known_id_arr = get_dict_img_data()
    file_total = count_files()
    total_page = math.ceil(file_total / SIZE)
    for page in range(1, total_page + 1):
        db_files = query_files(page)
        for db_file in db_files:
            fp = os.path.join(db_file.get('dir'), db_file.get('filename'))
            file_id = db_file.get('id')
            file_face_num = db_file.get('face_num')
            results_arr = compare_faces(known_arr, fp)
            if file_face_num == 0:
                face_num = len(results_arr)
                if face_num > 0:
                    update_file_face_num(file_id, face_num)
            for results in results_arr:
                for i, result in enumerate(results):
                    if result:
                        known_id = known_id_arr[i]
                        save_img_recognize_results([known_id, file_id])


init_sqlite()

# tidy_dir_files('D:\\workspace\\my\\scripts\\python\\file_tidy\\x')
# tidy_dir_files('F:\\DCIM')

# print(query_img_dict_from_db(1))
# k_face_1 = np.load(model_data_base_dir + '{}.npy'.format(1))
# k_face_2 = np.load(model_data_base_dir + '{}.npy'.format(2))
# ufp = 'D:\\Workspace\\my\\scripts\\python\\file_tidy\\x\\test.jpeg'
# results_arr = compare_faces([k_face_1, k_face_2], ufp)
# print(results_arr)
batch_mark_img()

try:
    if conn is not None:
        conn.close()
except Exception as e:
    print(e)
