

import cv2
import numpy as np

import torch
# import torchvision


from my_py_toolkit.file.file_toolkit import *
from PIL import Image
from tqdm import tqdm




def saveimg(tensor, path='./test.jpg'):
    # 将 tensor 还原为图像
    img = np.asarray(tensor).transpose(1, 2, 0)
    # img = (img + 1)/2 * 256
    img = img * 255
    img = np.uint8(img)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = Image.fromarray(img)
    img.save(path)
    
    
def get_metric_all_detect(prop, p_true):
    """
    输出所有 label 为 1 ，被正确识别时准确率，与 label = 1 中最小 property。 

    Args:
        prop (np.array): _description_
        p_true (np.array): _description_

    Returns:
        _type_: _description_
    """
    min_score = prop[np.where(p_true==1)].min()
    idx_larger_min = np.where(prop >= min_score)
    label_predict_true = p_true[idx_larger_min]
    precision = label_predict_true.sum() / label_predict_true.shape[0]
    return precision, min_score

def save_predict_img(imgs, pre_y, true_y, save_dir, steps, predict_num):
    pre_y = np.asarray(pre_y)
    true_y = np.asarray(true_y)
    for idx in range(len(imgs)):
        file_name = 'fake' if true_y[idx] == 1 else 'real'
        file_name = f'{save_dir}/{steps}/{file_name}_{predict_num}_{pre_y[idx]}_.jpg'
        make_path_legal(file_name)
        saveimg(imgs[idx], file_name)
        predict_num += 1
    return predict_num

    
def save_error_img(imgs, pre_y, true_y, save_dir, steps, error_num):
    pre_y = np.asarray(pre_y)
    true_y = np.asarray(true_y)
    label_pre = pre_y[:, 1] > 0.5
    # print(f'label pre - true_y: {label_pre - true_y}')
    diff_idx = np.where((label_pre - true_y) != 0)[0]
    # true_y = true_y.tolist()
    # print(f'diff idx: {diff_idx}')
    for idx in diff_idx:
        file_name = 'fake' if true_y[idx] == 1 else 'real'
        file_name = f'{save_dir}/{steps}/{file_name}_{error_num}_{pre_y[idx]}_.jpg'
        make_path_legal(file_name)
        saveimg(imgs[idx], file_name)
        error_num += 1
    return error_num

def get_metrics(result):
    tp = sum([1 for p,t in zip(*result) if p==t and p == 1])
    tpfp = sum(result[0])
    tpfn = sum(result[1])
    precision = tp / tpfp if tpfp != 0 else 0
    recall = tp / tpfn if tpfn != 0 else 0
    f1 = 2*tp / (tpfp + tpfn) if (tpfp + tpfn) != 0 else 0
    return precision, recall, f1
    

def evaluate(model, dataloder, args, writer, steps):
    print(f'start evaluate ' + '-' * 80)
    model.eval()
    result = [[], []]
    with torch.no_grad():
        val_bar = tqdm(range(len(dataloder)))
        for imgs, boxes, labels in dataloder:
            imgs, boxes, labels = imgs.to(args.device), boxes.to(args.device), labels.to(args.device)
            output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} 
                        for i in range(args.batch_size)])
            
            result[0].extend(output[0].max(-1).indices.tolist())
            result[1].extend(labels.tolist())
            val_bar.update(1)
        val_bar.close()
    p, r , f1 = get_metrics(result)
    writer.add_scalar('precision', p, steps)
    writer.add_scalar('recall', r, steps)
    writer.add_scalar('f1', f1, steps)
    print(f'eval res:{result}')    

def handle_labels_pre(labels):
    res = []
    for label in labels:
        if sum(label) > 0:
            res.append(1)
        else:
            res.append(0)
    return res

def evaluate_box(model, dataloder, args, writer, steps):
    print(f'start evaluate ' + '-' * 80)
    model.eval()
    result = [[], []]
    with torch.no_grad():
        val_bar = tqdm(range(len(dataloder)))
        for imgs, boxes, labels in dataloder:
            imgs, boxes, labels = imgs.to(args.device), boxes.to(args.device), labels.to(args.device)
            output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} 
                        for i in range(imgs.shape[0])])
          
            result[0].extend(handle_labels_pre([box['labels'] for box in output]))
            result[1].extend(labels.tolist())
            val_bar.update(1)
            # print(output)
        val_bar.close()
    p, r , f1 = get_metrics(result)
    writer.add_scalar('precision', p, steps)
    writer.add_scalar('recall', r, steps)
    writer.add_scalar('f1', f1, steps)
    print(f'eval res:{result}')

def handle_output(outputs, thres=0.5):
    res = []
    for item in outputs:
        boxes = item['boxes']
        scores = item['scores'].tolist()
        if any([ v >= thres for v in scores]):
            res.append(1)
        else:
            res.append(0)
    return res
        

def handle_output_pr(output, labels):
    scores = []
    label_scores = []
    for i, item in enumerate(output):
        cur_scores = item['scores'].tolist()
        scores.extend(cur_scores)
        label_scores += [labels[i]] * len(cur_scores)
    return scores, label_scores
        
def get_metric_all_detect(prop, p_true):
    """
    输出所有 label 为 1 ，被正确识别时准确率，与 label = 1 中最小 property。 

    Args:
        prop (np.array): _description_
        p_true (np.array): _description_

    Returns:
        _type_: _description_
    """
    min_score = prop[np.where(p_true==1)].min()
    idx_larger_min = np.where(prop >= min_score)
    label_predict_true = p_true[idx_larger_min]
    precision = label_predict_true.sum() / label_predict_true.shape[0]
    return precision, min_score
        
def evaluate_pr(model, dataloder, args, writer, steps, valid_steps=-1):
    print(f'start evaluate ' + '-' * 80)
    model.eval()
    scores = []
    scores_label = []
    # result = [[], []]
    model_out = []
    error_nums = 0
    predict_num = 0
    with torch.no_grad():
        val_bar = tqdm(range(len(dataloder)))
        for imgs, labels in dataloder:
            if valid_steps > 0 and len(model_out) > valid_steps:
                break
            imgs, labels = imgs.to(args.device), labels.to(args.device)
            output = model(imgs)
            scores.extend(output[:, 1].tolist())
            scores_label.extend(labels.tolist())
            model_out.extend(output.tolist())
            val_bar.update(1)
            
            if args.save_error:
                error_nums = save_error_img(imgs.to('cpu'), output.to('cpu'), labels.to('cpu'), 
                                            args.error_dir, steps, error_nums)
            if args.save_predict:
                predict_num = save_predict_img(imgs.to('cpu'), output.to('cpu'), labels.to('cpu'), 
                                               args.predict_dir, steps, predict_num)
        val_bar.close()
    scores, scores_label = np.asarray(scores), np.asarray(scores_label)
    writer.add_pr_curve('test_fake', scores_label, scores, steps)
    writer.add_pr_curve('test_real', 1 - scores_label, 1 - scores, steps)
    
    fake_precison, fake_min_score = get_metric_all_detect(scores, scores_label)
    real_precison, real_max_score = get_metric_all_detect(1 - scores, 1 - scores_label)
    real_max_score = 1 - real_max_score
    writer.add_scalar('fake_precion', fake_precison, steps)
    writer.add_scalar('fake_min_score', fake_min_score, steps)
    writer.add_scalar('real_precision', real_precison, steps)
    writer.add_scalar('real_max_score', real_max_score, steps)
    print(f'fake_precision: {fake_precison}, fake min score: {fake_min_score}')
    print(f'real_precision: {real_precison}, real max score: {real_max_score}')

    make_path_legal(f'./output/out_eval_{steps}.json')
    writejson([(l.item(),s.item()) for l,s in zip(scores_label, scores)], f'./output/out_eval_{steps}.json')

def evaluate_fakebox_pr(model, dataloder, args, writer, steps, valid_steps=-1):
    print(f'start evaluate ' + '-' * 80)
    model.eval()
    scores = []
    scores_label = []
    # result = [[], []]
    model_out = []
    with torch.no_grad():
        val_bar = tqdm(range(len(dataloder)))
        for imgs, boxes, labels in dataloder:
            if valid_steps > 0 and len(model_out) > valid_steps:
                break
            imgs, boxes, labels = imgs.to(args.device), boxes.to(args.device), labels.to(args.device)
            output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} 
                        for i in range(imgs.shape[0])])
          
            cur_scores, cur_score_labels = handle_output_pr(output, labels)
            scores.extend(cur_scores)
            scores_label.extend(cur_score_labels)
            model_out.extend(output)
            val_bar.update(1)
        val_bar.close()
    scores, scores_label = np.asarray(scores), np.asarray(scores_label)
    writer.add_pr_curve('test_fake', scores_label, scores, steps)
    writer.add_pr_curve('test_real', 1 - scores_label, 1 - scores, steps)

    for item in model_out:
        for k, v in item.items():
            item[k] = v.tolist()
    make_path_legal('./output/out_eval_{steps}.json')
    writejson(model_out, './output/out_eval_{steps}.json')

def evaluate_fakebox(model, dataloder, args, writer, steps):
    print(f'start evaluate ' + '-' * 80)
    model.eval()
    result = [[], []]
    model_out = []
    with torch.no_grad():
        val_bar = tqdm(range(len(dataloder)))
        for imgs, boxes, labels in dataloder:
            imgs, boxes, labels = imgs.to(args.device), boxes.to(args.device), labels.to(args.device)
            output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} 
                        for i in range(imgs.shape[0])])
          
            result[0].extend(handle_output(output))
            result[1].extend(labels.tolist())
            model_out.extend(output)
            val_bar.update(1)
            # print(output)
        val_bar.close()
    p, r , f1 = get_metrics(result)
    writer.add_scalar('precision', p, steps)
    writer.add_scalar('recall', r, steps)
    writer.add_scalar('f1', f1, steps)
    print(f'eval res:{result}')

    for item in model_out:
        for k, v in item.items():
            item[k] = v.tolist()
    make_path_legal('./output/out_eval_{steps}.json')
    writejson(model_out, './output/out_eval_{steps}.json')