# -*- coding: utf-8 -*-
import argparse
import os
import torch
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import xml.etree.ElementTree as ET
from flyai.data_helper import DataHelper
from flyai.framework import FlyAI
from path import MODEL_PATH, DATA_PATH
import pandas as pd
from net import get_model,get_model_denmodel
import matplotlib.pyplot as plt
import cv2
from flyai.utils.log_helper import train_log
from detectron2.structures import BoxMode

# Some basic setup:
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()

# import some common libraries
import numpy as np
import os, json, cv2, random

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog

'''
此项目为FlyAI2.0新版本框架，数据读取，评估方式与之前不同
2.0框架不再限制数据如何读取
样例代码仅供参考学习，可以自己修改实现逻辑。
模版项目下载支持 PyTorch、Tensorflow、Keras、MXNET、scikit-learn等机器学习框架
第一次使用请看项目中的：FlyAI2.0竞赛框架使用说明.html
使用FlyAI提供的预训练模型可查看：https://www.flyai.com/models
学习资料可查看文档中心：https://doc.flyai.com/
常见问题：https://doc.flyai.com/question.html
遇到问题不要着急，添加小姐姐微信，扫描项目里面的：FlyAI小助手二维码-小姐姐在线解答您的问题.png
'''
if not os.path.exists(MODEL_PATH):
    os.makedirs(MODEL_PATH)

# 项目的超参，不使用可以删除
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--EPOCHS", default=2, type=int, help="train epochs")
parser.add_argument("-b", "--BATCH", default=1, type=int, help="batch size")
args = parser.parse_args()

# 必须使用该方法下载模型，然后加载

#from flyai.utils import remote_helper
#weight_path = remote_helper.get_remote_data('https://www.flyai.com/m/faster_rcnn_X_101_32x8d_FPN_3x.pkl')
# 必须使用该方法下载模型，然后加载

from flyai.utils import remote_helper
weight_path = remote_helper.get_remote_data('https://www.flyai.com/m/faster_rcnn_R_101_FPN_3x.pkl')
print(weight_path)

# 判断gpu是否可用
if torch.cuda.is_available():
    device = torch.device('cuda')
else:
    device = torch.device('cpu')

def get_xml(xml_path):
    tree = ET.parse(xml_path)
    root = tree.getroot()
    boxes = []
    for object in root.findall('object'):
        # object_name = object.find('label').text
        Xmin = int(object.find('bndbox').find('xmin').text)
        Ymin = int(object.find('bndbox').find('ymin').text)
        Xmax = int(object.find('bndbox').find('xmax').text)
        Ymax = int(object.find('bndbox').find('ymax').text)
        height = Ymax - Ymin
        weight = Xmax - Xmin
        if height < 10 or weight < 10:
            continue
        boxes.append([Xmin, Ymin, Xmax, Ymax])
    return boxes


class Main(FlyAI):
    '''
    项目中必须继承FlyAI类，否则线上运行会报错。
    '''
    def __init__(self):
        self.download_data()
        csv_path = os.path.join(DATA_PATH, 'TBDetection', 'train.csv')
        df = pd.read_csv(csv_path)
        self.img_file_list = list(df['image_path'].values)
        self.xml_file_list = list(df['xml_path'].values)
        trainNum = int(len(self.img_file_list) * 0.9)
        self.val_image_list = self.img_file_list[trainNum:]
        self.val_xml_image_list = self.xml_file_list[trainNum:]


    def download_data(self):
        # 根据数据ID下载训练数据
        data_helper = DataHelper()
        data_helper.download_from_ids("TBDetection")
        print('download data done...')

    def deal_with_data(self,root_path,flag="train"):
        '''
        处理数据，没有可不写。
        :return:
        '''
        dataset_dicts = []
        img_file_list = self.img_file_list if flag == 'train' else self.val_image_list
        xml_file_list = self.xml_file_list if flag == 'train' else self.val_xml_image_list
        
        for i,(img_path,xml_path) in enumerate(zip(img_file_list,xml_file_list)):
            record = {}
            file_name = os.path.join(root_path,'TBDetection/'+ img_path)
            xml_name = os.path.join(root_path,"TBDetection/" + xml_path)
            height,weight = cv2.imread(file_name).shape[:2]

            record["file_name"] = file_name
            record["image_id"] = i
            record["height"] = height
            record["width"] = weight

            annotations = []
            boxs = get_xml(xml_name)
            for box in boxs:
                box_val = {}
                box_val['bbox'] = box
                box_val['bbox_mode'] = BoxMode.XYXY_ABS
                box_val['category_id'] = 0
                box_val['iscrowd'] = 0
                annotations.append(box_val)

            record["annotations"] = annotations
            dataset_dicts.append(record)

        return dataset_dicts

    def cv2_imshow(self,a):
        """A replacement for cv2.imshow() for use in Jupyter notebooks.
        
         Args:
           a : np.ndarray. shape (N, M) or (N, M, 1) is an NxM grayscale image. shape
             (N, M, 3) is an NxM BGR color image. shape (N, M, 4) is an NxM BGRA color
             image.
        """
        a = a.clip(0, 255).astype('uint8')
        # cv2 stores colors as BGR; convert to RGB
        if a.ndim == 3:
          if a.shape[2] == 4:
            a = cv2.cvtColor(a, cv2.COLOR_BGRA2RGBA)
          else:
            a = cv2.cvtColor(a, cv2.COLOR_BGR2RGB)

        cv2.imshow("display",a)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    def image_show(self):

        for d in ["train", "val"]:
            DatasetCatalog.register("TBDetection_" + d, lambda d=d: self.deal_with_data(DATA_PATH,d))
            MetadataCatalog.get("TBDetection_" + d).set(thing_classes=["TBDetection"])

        TBDetection_metadata = MetadataCatalog.get("TBDetection_train")

        dataset_dicts = self.deal_with_data(DATA_PATH,"train")

        for d in random.sample(dataset_dicts, 3):
            img = cv2.imread(d["file_name"])
            visualizer = Visualizer(img[:, :, ::-1], metadata=TBDetection_metadata, scale=0.5)
            out = visualizer.draw_dataset_dict(d)
            self.cv2_imshow(out.get_image()[:, :, ::-1])

    def train(self):
        '''
        训练模型，必须实现此方法
        :return:
        '''
        for d in ["train", "val"]:
            DatasetCatalog.register("TBDetection_" + d, lambda d=d: self.deal_with_data(DATA_PATH,d))
            MetadataCatalog.get("TBDetection_" + d).set(thing_classes=["TBDetection"])

        
        from detectron2.engine import DefaultTrainer
        from detectron2.checkpoint import DetectionCheckpointer
        cfg = get_cfg()
        #cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/retinanet_R_50_FPN_1x.yaml"))
        #cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"))
        cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml"))
        cfg.DATASETS.TRAIN = ("TBDetection_train",)
        cfg.DATASETS.TEST = ("TBDetection_val", )
        cfg.DATALOADER.NUM_WORKERS = 2
        #cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/retinanet_R_50_FPN_1x.yaml")  # Let training initialize from model zoo
        #cfg.MODEL.WEIGHTS = './retinanet_R_50_FPN_1x.pkl'  # Let training initialize from model zoo
        #cfg.MODEL.WEIGHTS = './faster_rcnn_X_101_32x8d_FPN_3x.pkl'  # Let training initialize from model zoo
        #cfg.MODEL.WEIGHTS = './faster_rcnn_R_101_FPN_3x.pkl'  # Let training initialize from model zoo
        cfg.MODEL.WEIGHTS = weight_path
        cfg.SOLVER.IMS_PER_BATCH = 1
        cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
        cfg.SOLVER.MAX_ITER = 7000    # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
        cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # faster, and good enough for this toy dataset (default: 512)
        cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon)
        cfg.OUTPUT_DIR = MODEL_PATH

        os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
        trainer = DefaultTrainer(cfg) 
        trainer.resume_or_load(resume=False)
        trainer.train()

        from detectron2.evaluation import COCOEvaluator, inference_on_dataset
        from detectron2.data import build_detection_test_loader
        evaluator = COCOEvaluator("TBDetection_val", cfg, False, output_dir="./output/")
        val_loader = build_detection_test_loader(cfg, "TBDetection_val")
        print(inference_on_dataset(trainer.model, val_loader, evaluator))
        # another equivalent way is to use trainer.test

if __name__ == '__main__':
    main = Main()
    #main.download_data()
    #main.deal_with_data()
    #main.image_show()
    main.train()