import sys
import os
from datetime import datetime

from sklearn.metrics import classification_report

sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import argparse
import json
import re
from pprint import pprint
import cv2
import numpy as np
import tensorflow as tf
import logging
from tensorflow.keras.optimizers import Adadelta, Adam

##############################################
from src.image_classification import data_processor, model_generator, data_analysis

use_label_file = False  # true: 从一个文件中加载标签名称；该文件应包含所使用的标签名称，每个标签在一个单独的行中
label_file = 'labels.txt'
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
base_dir = '/'.join(base_dir.split('\\'))
test_dir = base_dir + '/data/Testing'
train_dir = base_dir + '/data/Training'
saved_files = base_dir + '/src/image_classification/output_files'  # 保存输出文件的根文件夹；这些文件将位于输出文件/模型名称下
image_size = (100, 100)  # 所用图像的(宽度,高度)
input_shape = image_size + (3,)  # 受训模型的预期输入形状；由于图像尺寸是100 x 100的RGB图像，因此维度为(宽度,高度,3)


##############################################


def use_model(args, dir_name):
    """
    测试模型
    :param dir_name: 模型所在文件夹名称
    :return: 预测词典
    """
    input_dir = args.input_dir
    output_file = args.output_file
    result = dict()
    model_out_dir = saved_files + '/' + dir_name
    if not os.path.exists(model_out_dir):
        print("No saved model found")
        exit(0)
    model = tf.keras.models.load_model(model_out_dir + "/model.h5", custom_objects={'tf': tf})
    x_test_names = os.listdir(input_dir)
    x_test = np.ndarray(shape=(len(x_test_names), 100, 100, 3), dtype=np.int)
    temp_names = []
    for n in x_test_names:
        if not (n.endswith('.jpg') or n.endswith('.png')):
            continue
        temp_names.append(n)
    x_test_names = temp_names
    for i in range(len(x_test_names)):
        picture_path = input_dir + '/' + x_test_names[i]
        if re.match('.+\\.png', picture_path.lower()):
            picture_path = data_processor.png_to_jpg(picture_path)
        image = cv2.imread(picture_path)
        image = cv2.resize(image, (100, 100))
        image_array = np.asarray(image)
        x_test[i] = image_array
    y_pred = model.predict(x_test)
    with open(base_dir + '/src/image_classification/output_files/{}/key_val.json'.format(name), mode='r',
              encoding='utf-8') as f:
        keys = json.loads(f.read())
    n_digits = 3
    if args.soft_predict:
        threshold = args.threshold  # 用于筛选输出的那些可能的类的阈值
        for i in range(len(x_test_names)):
            temp = y_pred[i]
            max_i = int(np.argmax(y_pred[i]))
            result_i = [[keys[labels[max_i]], round(float(np.max(y_pred[i])), n_digits)]]
            for j in range(len(temp)):
                if temp[j] >= threshold:
                    if j != max_i:
                        result_i.append([keys[labels[j]], round(float(temp[j]), n_digits)])
            result_i.sort(key=lambda x: x[1], reverse=True)
            result[x_test_names[i]] = result_i
        if args.vote:
            vote_result = {}
            rate = 1 / len(result)
            for picture in result:
                for r in result[picture]:
                    if vote_result.get(r[0]) is None:
                        vote_result[r[0]] = r[1] * rate
                    else:
                        vote_result[r[0]] += r[1] * rate
            temp = []
            for i in vote_result:
                if vote_result[i] >= threshold:
                    temp.append([i, vote_result[i]])
            temp.sort(key=lambda x: x[1], reverse=True)
            result['vote'] = temp
    else:
        for i in range(len(x_test_names)):
            result[x_test_names[i]] = [keys[labels[int(np.argmax(y_pred[i]))]],
                                       round(float(np.max(y_pred[i])), n_digits)]
    show_pattern = '\033[1;36m{:-^80}\033[0m'
    print(show_pattern.format('预测结果：'))
    pprint(result)
    with open(output_file, mode='w', encoding='utf-8') as file:
        file.write(json.dumps(result, ensure_ascii=False, indent=2))
    print(show_pattern.format(''))


def parse():
    """
    解析命令行参数。使用该命令行对新的图片进行预测
    -input_dir：待预测的的图片文件夹
    -output_file：预测结果文件夹，类型为.json，其中key为图片名称，value为预测的类别

    :return: 参数列表
    """
    parser = argparse.ArgumentParser(description="use trained model to predict new pictures")
    parser.add_argument('-input_dir', type=str, default='None', help='path of input file')
    parser.add_argument('-output_file', type=str, default='None',
                        help='path of output file, type must be .json')
    parser.add_argument('-soft_predict', type=bool, default=True,
                        help='soft_predict:\nTrue:Output the most likely classes instead of one')
    parser.add_argument('-threshold', type=float, default=0.1,
                        help='Thresholds for those possible classes used to filter the output')
    parser.add_argument('-vote', type=bool, default=False,
                        help='Vote for the category represented by these pictures (these pictures should belong to the same category). It can only be used when the variable [soft_predict] is True')
    args = parser.parse_args()
    return args


def evaluate():
    """
    评估模型

    :return:
    """
    threshold = 0.1
    verbose = 1
    batch_size = 41
    learning_rate = 1e-1
    model = model_generator.network_resnet(input_shape=input_shape, num_classes=300)

    out_name = "{}-评估".format(name) + datetime.now().strftime("-%Y-%m-%d-%H-%M-%S")
    os.makedirs(base_dir + '/src/image_classification/output_files/{}'.format(out_name))
    model.load_weights(base_dir + '/src/image_classification/output_files/{}/model.h5'.format(name))
    optimizer = Adadelta(learning_rate=learning_rate)
    # optimizer = Adam(learning_rate=learning_rate)
    model.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"])
    train_gen, valid_gen, test_gen = data_processor.build_data_generators(train_dir, test_dir, labels=labels,
                                                                          image_size=image_size,
                                                                          batch_size=batch_size)
    test_gen.reset()  # 把批处理大小改为0
    y_pred = model.predict(test_gen, steps=(test_gen.n // batch_size) + 1, verbose=verbose)
    y_true = test_gen.classes[test_gen.index_array]
    class_report = classification_report(y_true, y_pred.argmax(axis=-1), target_names=labels)
    with open(base_dir + '/src/image_classification/output_files/{}/classification_report.txt'.format(out_name),
              "w") as text_file:
        text_file.write("%s" % class_report)
    loose_class_report = data_analysis.loose_classification_report(y_true, y_pred, target_names=labels,
                                                                   threshold=threshold)
    with open(
            base_dir + '/src/image_classification/output_files/{}/loose_classification_report<threshold={}>.txt'.format(
                out_name,
                threshold),
            "w") as text_file:
        text_file.write("%s" % loose_class_report)
    data_analysis.plot_confusion_matrix(y_true, y_pred.argmax(axis=-1), labels,
                                        out_path=base_dir + '/src/image_classification/output_files/{}'.format(
                                            out_name))


if __name__ == '__main__':
    name = 'cnn-自己拍摄的数据-11种-normalization-2022-03-09-17-00-01'

    if use_label_file:
        with open(label_file, "r") as f:
            labels = [x.strip() for x in f.readlines()]
    else:
        labels = []
        with open(base_dir + '/src/image_classification/output_files/{}/all_labels.txt'.format(name), mode='r',
                  encoding='utf-8') as f:
            for line in f.readlines():
                labels.append(line.strip())
    num_classes = len(labels)

    # 预测
    args = parse()
    use_model(args, name)

    # # 评测
    # evaluate()
