

from ast import arg
from cProfile import label
import copy

import cv2
import numpy as np

import torch
import torch.nn.functional as F
import torchvision

from argparse import ArgumentParser, Namespace
from torch import nn
from torchvision.ops import MultiScaleRoIAlign
from typing import List, Tuple, Dict

from my_py_toolkit.file.file_toolkit import *
from torch.optim import Adam
from torch.utils.data import DataLoader, Dataset, TensorDataset
from torchvision.models.detection._utils import overwrite_eps
from torchvision._internally_replaced_utils import load_state_dict_from_url

from torchvision.models.detection.anchor_utils import AnchorGenerator
from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN
from torchvision.models.detection.rpn import RPNHead, RegionProposalNetwork
# from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone, _validate_trainable_layers, mobilenet_backbone
from torchvision.models.detection import _utils as det_utils
from torchvision.models.detection.roi_heads import fastrcnn_loss, maskrcnn_loss, maskrcnn_inference, keypointrcnn_loss, keypointrcnn_inference
from torchvision.ops import boxes as box_ops
from tqdm import tqdm
from lib.args import get_args
from lib.model import PSDetector, ClsEff
from lib.utils import *


def get_metrics(result):
    tp = sum([1 for p,t in zip(*result) if p==t and p == 1])
    tpfp = sum(result[0])
    tpfn = sum(result[1])
    precision = tp / tpfp if tpfp != 0 else 0
    recall = tp / tpfn if tpfn != 0 else 0
    f1 = 2*tp / (tpfp + tpfn) if (tpfp + tpfn) != 0 else 0
    return precision, recall, f1
    

def evaluate(model, dataloder, args, writer, steps):
    print(f'start evaluate ' + '-' * 80)
    model.eval()
    result = [[], []]
    with torch.no_grad():
        val_bar = tqdm(range(len(dataloder)))
        for imgs, boxes, labels in dataloder:
            imgs, boxes, labels = imgs.to(args.device), boxes.to(args.device), labels.to(args.device)
            output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} 
                        for i in range(args.batch_size)])
            
            result[0].extend(output[0].max(-1).indices.tolist())
            result[1].extend(labels.tolist())
            val_bar.update(1)
        val_bar.close()
    p, r , f1 = get_metrics(result)
    writer.add_scalar('precision', p, steps)
    writer.add_scalar('recall', r, steps)
    writer.add_scalar('f1', f1, steps)
    print(f'eval res:{result}')    

def handle_labels_pre(labels):
    res = []
    for label in labels:
        if sum(label) > 0:
            res.append(1)
        else:
            res.append(0)
    return res

def evaluate_box(model, dataloder, args, writer, steps):
    print(f'start evaluate ' + '-' * 80)
    model.eval()
    result = [[], []]
    with torch.no_grad():
        val_bar = tqdm(range(len(dataloder)))
        for imgs, boxes, labels in dataloder:
            imgs, boxes, labels = imgs.to(args.device), boxes.to(args.device), labels.to(args.device)
            output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} 
                        for i in range(imgs.shape[0])])
          
            result[0].extend(handle_labels_pre([box['labels'] for box in output]))
            result[1].extend(labels.tolist())
            val_bar.update(1)
            # print(output)
        val_bar.close()
    p, r , f1 = get_metrics(result)
    writer.add_scalar('precision', p, steps)
    writer.add_scalar('recall', r, steps)
    writer.add_scalar('f1', f1, steps)
    print(f'eval res:{result}')

def handle_output(outputs, thres=0.5):
    res = []
    for item in outputs:
        boxes = item['boxes']
        scores = item['scores'].tolist()
        if any([ v >= thres for v in scores]):
            res.append(1)
        else:
            res.append(0)
    return res
        

def handle_output_pr(output, labels):
    scores = []
    label_scores = []
    for i, item in enumerate(output):
        cur_scores = item['scores'].tolist()
        scores.extend(cur_scores)
        label_scores += [labels[i]] * len(cur_scores)
    return scores, label_scores
        
        
def evaluate_pr(model, dataloder, args, writer, steps, valid_steps=-1):
    print(f'start evaluate ' + '-' * 80)
    model.eval()
    scores = []
    scores_label = []
    # result = [[], []]
    model_out = []
    with torch.no_grad():
        val_bar = tqdm(range(len(dataloder)))
        for imgs, boxes, labels in dataloder:
            if valid_steps > 0 and len(model_out) > valid_steps:
                break
            imgs, boxes, labels = imgs.to(args.device), boxes.to(args.device), labels.to(args.device)
            output = model(imgs)
          
            # cur_scores, cur_score_labels = handle_output_pr(output, labels)
            scores.extend(output[:, 1].tolist())
            scores_label.extend(labels.tolist())
            model_out.extend(output.tolist())
            val_bar.update(1)
        val_bar.close()
    scores, scores_label = np.asarray(scores), np.asarray(scores_label)
    writer.add_pr_curve('test_fake', scores_label, scores, steps)
    writer.add_pr_curve('test_real', 1 - scores_label, 1 - scores, steps)

    make_path_legal(f'./output/out_eval_{steps}.json')
    writejson([(l,s) for l,s in zip(labels, scores)], f'./output/out_eval_{steps}.json')

def evaluate_fakebox_pr(model, dataloder, args, writer, steps, valid_steps=-1):
    print(f'start evaluate ' + '-' * 80)
    model.eval()
    scores = []
    scores_label = []
    # result = [[], []]
    model_out = []
    with torch.no_grad():
        val_bar = tqdm(range(len(dataloder)))
        for imgs, boxes, labels in dataloder:
            if valid_steps > 0 and len(model_out) > valid_steps:
                break
            imgs, boxes, labels = imgs.to(args.device), boxes.to(args.device), labels.to(args.device)
            output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} 
                        for i in range(imgs.shape[0])])
          
            cur_scores, cur_score_labels = handle_output_pr(output, labels)
            scores.extend(cur_scores)
            scores_label.extend(cur_score_labels)
            model_out.extend(output)
            val_bar.update(1)
        val_bar.close()
    scores, scores_label = np.asarray(scores), np.asarray(scores_label)
    writer.add_pr_curve('test_fake', scores_label, scores, steps)
    writer.add_pr_curve('test_real', 1 - scores_label, 1 - scores, steps)

    for item in model_out:
        for k, v in item.items():
            item[k] = v.tolist()
    make_path_legal('./output/out_eval_{steps}.json')
    writejson(model_out, './output/out_eval_{steps}.json')

def evaluate_fakebox(model, dataloder, args, writer, steps):
    print(f'start evaluate ' + '-' * 80)
    model.eval()
    result = [[], []]
    model_out = []
    with torch.no_grad():
        val_bar = tqdm(range(len(dataloder)))
        for imgs, boxes, labels in dataloder:
            imgs, boxes, labels = imgs.to(args.device), boxes.to(args.device), labels.to(args.device)
            output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} 
                        for i in range(imgs.shape[0])])
          
            result[0].extend(handle_output(output))
            result[1].extend(labels.tolist())
            model_out.extend(output)
            val_bar.update(1)
            # print(output)
        val_bar.close()
    p, r , f1 = get_metrics(result)
    writer.add_scalar('precision', p, steps)
    writer.add_scalar('recall', r, steps)
    writer.add_scalar('f1', f1, steps)
    print(f'eval res:{result}')

    for item in model_out:
        for k, v in item.items():
            item[k] = v.tolist()
    make_path_legal('./output/out_eval_{steps}.json')
    writejson(model_out, './output/out_eval_{steps}.json')
    
    
def main():
    
    args = get_args()
    
    model = ClsEff(2).to(args.device)
    
    if args.resume:
        model.load_state_dict(torch.load(f'./model/model_{args.resume_epoch}.pth'))
                    
    params = [p for p in model.parameters() if p.requires_grad]
    opt = Adam(params, lr=args.lr)
                    

    # dataloader
    # prerpocess data
    dataloader = get_dataloader(args.batch_size, args.data_dir, args.train_dir, 'train')
    test_dataloader = get_dataloader(args.batch_size, args.data_dir, args.test_dir, 'test')
    
    writer = get_writer(args)
    steps_global = 0
    for epoch in range(10):
        model.train()
        train_bar = tqdm(range(len(dataloader)))
        for i, (imgs, boxes, labels) in enumerate(dataloader):
            opt.zero_grad()
            imgs, boxes, labels = imgs.to(args.device), boxes.to(args.device), labels.to(args.device)
            loss = 0
            # output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} 
            #             for i in range(args.batch_size)])
            loss = model(imgs, labels)
            # for k, items in output.items():
            #     for sk, v in items.items():
            #         loss += v
            #         writer.add_scalar(sk, v.item(), steps_global)
            loss.backward()
            opt.step()
            train_bar.set_description(f'epoch {epoch} loss {loss.item()}')
            train_bar.update(1)
            steps_global += 1
            if steps_global % args.steps_for_eval == 0:
                evaluate_pr(model, test_dataloader, args, writer, steps_global, args.steps_eval)
                model.train()
            
        torch.save(model.state_dict(), f'./model/model_{epoch}.pth')
        train_bar.close()

def get_dataloader(batch_size, data_dir, names=[], mode='train'):
    images = []
    boxes = []
    labels = []
    paths = []
    for name in names:
        paths.extend(get_file_paths(f'{data_dir}/{name}'))
    boxes_info = readjson(f'{data_dir}/{mode}_boxes.json')
    for p in paths:
        img = cv2.imread(p) / 255
        img = cv2.resize(img, (256, 256))
        img = torch.tensor(img).permute(2, 0, 1).to(torch.float32)
        b_info = boxes_info.get(get_file_name(p), [0, 1, 0, 1])
        l = 1 if b_info != [0, 1, 0, 1] else 0
        b_info[:4] = [b_info[0], b_info[2], b_info[1], b_info[3]]
        box = torch.tensor(b_info[:4])
        lable = torch.tensor(l)
        images.append(img)
        boxes.append(box)
        labels.append(lable)
    dataloader = DataLoader(TensorDataset(torch.stack(images), torch.stack(boxes), torch.stack(labels)), batch_size=batch_size, shuffle=True)
    return dataloader

if __name__ == "__main__":
    main()