# -*- coding: utf-8 -*-
from PIL import Image
import xml.etree.ElementTree as ET
from flyai.data_helper import DataHelper
from flyai.framework import FlyAI
from path import MODEL_PATH, DATA_PATH
import pandas as pd
from net import get_model
import matplotlib.pyplot as plt
import cv2
from flyai.utils.log_helper import train_log
import numpy as np

import datetime
import os
import argparse
import traceback

import torch
import yaml
from torch import nn
from torch.utils.data import DataLoader,Dataset
from torchvision import transforms
from efficientdet.dataset import CocoDataset, Resizer, Normalizer, Augmenter, collater

from tensorboardX import SummaryWriter
import numpy as np
from tqdm.autonotebook import tqdm
from backbone import EfficientDetBackbone
from efficientdet.loss import FocalLoss
from utils.sync_batchnorm import patch_replication_callback
from utils.utils import replace_w_sync_bn, CustomDataParallel, get_last_weights, init_weights
from net import get_model_efficientdet
#from backbone import EfficientDetBackbone
'''
此项目为FlyAI2.0新版本框架，数据读取，评估方式与之前不同
2.0框架不再限制数据如何读取
样例代码仅供参考学习，可以自己修改实现逻辑。
模版项目下载支持 PyTorch、Tensorflow、Keras、MXNET、scikit-learn等机器学习框架
第一次使用请看项目中的：FlyAI2.0竞赛框架使用说明.html
使用FlyAI提供的预训练模型可查看：https://www.flyai.com/models
学习资料可查看文档中心：https://doc.flyai.com/
常见问题：https://doc.flyai.com/question.html
遇到问题不要着急，添加小姐姐微信，扫描项目里面的：FlyAI小助手二维码-小姐姐在线解答您的问题.png
'''
if not os.path.exists(MODEL_PATH):
    os.makedirs(MODEL_PATH)

class Params:
    def __init__(self, project_file):
        self.params = yaml.safe_load(open(project_file).read())

    def __getattr__(self, item):
        return self.params.get(item, None)

# 必须使用该方法下载模型，然后加载
from flyai.utils import remote_helper
weight_path = remote_helper.get_remote_data('https://www.flyai.com/m/efficientdet-d3.pth')

def get_args():
    parser = argparse.ArgumentParser('Yet Another EfficientDet Pytorch: SOTA object detection network - Zylo117')
    parser.add_argument("-e", "--EPOCHS", default=30, type=int, help="train epochs")
    parser.add_argument('-p', '--project', type=str, default='Tuberculosis', help='project file that contains parameters')
    parser.add_argument('-c', '--compound_coef', type=int, default=3, help='coefficients of efficientdet')
    parser.add_argument('-n', '--num_workers', type=int, default=1, help='num_workers of dataloader')
    parser.add_argument('-b','--BATCH', type=int, default=3, help='The number of images per batch among all devices')
    parser.add_argument('--head_only', type=boolean_string, default=False,
                        help='whether finetunes only the regressor and the classifier, '
                             'useful in early stage convergence or small/easy dataset')
    parser.add_argument('--lr', type=float, default=1e-4)
    parser.add_argument('--optim', type=str, default='sgd', help='select optimizer for training, '
                                                                   'suggest using \'admaw\' until the'
                                                                   ' very final stage then switch to \'sgd\'')
    parser.add_argument('--val_interval', type=int, default=5, help='Number of epoches between valing phases')
    parser.add_argument('--save_interval', type=int, default=300, help='Number of steps between saving')
    parser.add_argument('--es_min_delta', type=float, default=1e-2,
                        help='Early stopping\'s parameter: minimum change loss to qualify as an improvement')
    parser.add_argument('--es_patience', type=int, default=0,
                        help='Early stopping\'s parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.')
    parser.add_argument('--data_path', type=str, default='datasets/', help='the root folder of dataset')
    parser.add_argument('--log_path', type=str, default='logs/')
    parser.add_argument('-w', '--load_weights', type=str, default=weight_path,
                        help='whether to load weights from a checkpoint, set None to initialize, set \'last\' to load last checkpoint')
    parser.add_argument('--saved_path', type=str, default=MODEL_PATH)
    parser.add_argument('--debug', type=boolean_string, default=False, help='whether visualize the predicted boxes of training, '
                                                                  'the output images will be in test/')

    args = parser.parse_args()
    return args


def boolean_string(s):
    if s not in {'False', 'True'}:
        raise ValueError('Not a valid boolean string')
    return s == 'True'

def get_xml(xml_path):
    tree = ET.parse(xml_path)
    root = tree.getroot()
    boxes = []
    for object in root.findall('object'):
        # object_name = object.find('label').text
        Xmin = int(object.find('bndbox').find('xmin').text)
        Ymin = int(object.find('bndbox').find('ymin').text)
        Xmax = int(object.find('bndbox').find('xmax').text)
        Ymax = int(object.find('bndbox').find('ymax').text)
        w = Xmax -Xmin
        h = Ymax - Ymin
        if w < 20 or h < 20:
            continue
        boxes.append([Xmin, Ymin, Xmax, Ymax])
    return boxes

class ModelWithLoss(nn.Module):
    def __init__(self, model, debug=False):
        super().__init__()
        self.criterion = FocalLoss()
        self.model = model
        self.debug = debug

    def forward(self, imgs, annotations, obj_list=None):
        _, regression, classification, anchors = self.model(imgs)
        if self.debug:
            cls_loss, reg_loss = self.criterion(classification, regression, anchors, annotations,
                                                imgs=imgs, obj_list=obj_list)
        else:
            cls_loss, reg_loss = self.criterion(classification, regression, anchors, annotations)
        return cls_loss, reg_loss

class MyDataset(Dataset):
    def __init__(self, root, img_file_list, xml_file_list, transform=None):
        self.root = root
        self.transform = transform
        self.img_file_list = img_file_list
        self.xml_file_list = xml_file_list
        #self.classes = {'Tuberculosis':0}
        #self.labels = {0:'Tuberculosis'}

    def __getitem__(self, idx):
        img = self.load_image(idx)
        annot = self.load_annotations(idx)
        sample = {'img': img, 'annot': annot}
        if self.transform:
           sample = self.transform(sample)
        return sample

    def __len__(self):
        return len(self.img_file_list)
    
    def load_image(self,image_index):
        img_path = os.path.join(self.root, self.img_file_list[image_index])
        #img_name = img_path.split('/')[-1]
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
        return img.astype(np.float32) / 255.
    
    def load_annotations(self,image_index):
        xml_path = os.path.join(self.root, self.xml_file_list[image_index])
        boxes = get_xml(xml_path)
        annotations = np.zeros((0,5))
        for box in boxes:
            annotation = np.zeros((1, 5))
            annotation[0,:4] = box
            annotation[0,4] = 0
            annotations = np.append(annotations,annotation,axis=0)
        return annotations

params = Params(f'projects/Tuberculosis.yml')
if params.num_gpus == 0:
    os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
if torch.cuda.is_available():
    torch.cuda.manual_seed(42)
else:
    torch.manual_seed(42)

class Main(FlyAI):
    '''
    项目中必须继承FlyAI类，否则线上运行会报错。
    '''

    def download_data(self):
        # 根据数据ID下载训练数据
        data_helper = DataHelper()
        data_helper.download_from_ids("TBDetection")
        print('download data done...')

    def deal_with_data(self):
        '''
        处理数据，没有可不写。
        :return:
        '''
        csv_path = os.path.join(DATA_PATH, 'TBDetection', 'train.csv')
        df = pd.read_csv(csv_path)
        img_file_list = list(df['image_path'].values)
        xml_file_list = list(df['xml_path'].values)
        train_image_num = int(0.8 * len(img_file_list))
        
        val_img_file_list  = img_file_list[train_image_num:]
        val_xml_file_list = xml_file_list[train_image_num:]
        
        input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]
        transform_train = transforms.Compose([
            Normalizer(mean=params.mean,std=params.std),
            Augmenter(),
            Resizer(input_sizes[opt.compound_coef])
            ])
        
        transform_val = transforms.Compose([
            Normalizer(mean=params.mean,std=params.std),
            Resizer(input_sizes[opt.compound_coef])
            ])

        training_params = {'batch_size': opt.BATCH,
                           'shuffle': True,
                           'drop_last': True,
                           'collate_fn': collater,
                           'num_workers': opt.num_workers}

        val_params = {'batch_size': opt.BATCH,
                      'shuffle': False,
                      'drop_last': True,
                      'collate_fn': collater,
                      'num_workers': opt.num_workers}  
        train_data = MyDataset(os.path.join(DATA_PATH, 'TBDetection'), img_file_list, xml_file_list, transform=transform_train)
        self.train_loader = DataLoader(dataset=train_data, **training_params)
        val_data = MyDataset(os.path.join(DATA_PATH, 'TBDetection'), val_img_file_list, val_xml_file_list, transform=transform_val)
        self.val_loader = DataLoader(dataset=val_data, **val_params)


    def image_show(self):
        sample = iter(self.train_loader).next()
        for i,(annot,img) in enumerate(zip(sample.get('annot'),sample.get('img'))):
            image = img.permute(1,2,0)
            bboxs = annot
            image = image.numpy()
            bboxs = bboxs.numpy()
            img = image.copy()
            cv2.imshow("img",img)
            for bbox in bboxs: 
                Xmin, Ymin, Xmax, Ymax,_= bbox
                cv2.rectangle(img,(Xmin,Ymin),(Xmax,Ymax),color=(255,0,0),thickness=2)
            cv2.imwrite("./save/{}.jpg".format(str(i)),img*255)

    def train(self,opt):
        
        opt.saved_path = opt.saved_path + f'/{params.project_name}/'
        opt.log_path = opt.log_path + f'/{params.project_name}/tensorboard/'
        os.makedirs(opt.log_path, exist_ok=True)
        os.makedirs(opt.saved_path, exist_ok=True)
        
        training_generator = self.train_loader
        val_generator = self.val_loader
        
        model = EfficientDetBackbone(num_classes=len(params.obj_list), compound_coef=opt.compound_coef,
                                     ratios=eval(params.anchors_ratios), scales=eval(params.anchors_scales))
        #model = get_model_efficientdet(num_classes=2,is_training=True)
        # load last weights
        if opt.load_weights is not None:
            if opt.load_weights.endswith('.pth'):
                weights_path = opt.load_weights
            else:
                weights_path = get_last_weights(opt.saved_path)
            try:
                last_step = int(os.path.basename(weights_path).split('_')[-1].split('.')[0])
            except:
                last_step = 0
            try:
                ret = model.load_state_dict(torch.load(weights_path), strict=False)
            except RuntimeError as e:
                print(f'[Warning] Ignoring {e}')
                print(
                    '[Warning] Don\'t panic if you see this, this might be because you load a pretrained weights with different number of classes. The rest of the weights should be loaded already.')
            print(f'[Info] loaded weights: {os.path.basename(weights_path)}, resuming checkpoint from step: {last_step}')
        else:
            last_step = 0
            print('[Info] initializing weights...')
            init_weights(model)
        # freeze backbone if train head_only
        if opt.head_only:
            def freeze_backbone(m):
                classname = m.__class__.__name__
                for ntl in ['EfficientNet', 'BiFPN']:
                    if ntl in classname:
                        for param in m.parameters():
                            param.requires_grad = False
            model.apply(freeze_backbone)
            print('[Info] freezed backbone')
        # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
        # apply sync_bn when using multiple gpu and batch_size per gpu is lower than 4
        #  useful when gpu memory is limited.
        # because when bn is disable, the training will be very unstable or slow to converge,
        # apply sync_bn can solve it,
        # by packing all mini-batch across all gpus as one batch and normalize, then send it back to all gpus.
        # but it would also slow down the training by a little bit.
        if params.num_gpus > 1 and opt.BATCH // params.num_gpus < 4:
            model.apply(replace_w_sync_bn)
            use_sync_bn = True
        else:
            use_sync_bn = False
        writer = SummaryWriter(opt.log_path + f'/{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}/')
        # warp the model with loss function, to reduce the memory usage on gpu0 and speedup
        model = ModelWithLoss(model, debug=opt.debug)
        if params.num_gpus > 0:
            model = model.cuda()
            if params.num_gpus > 1:
                model = CustomDataParallel(model, params.num_gpus)
                if use_sync_bn:
                    patch_replication_callback(model)
        if opt.optim == 'adamw':
            optimizer = torch.optim.AdamW(model.parameters(), opt.lr)
        else:
            optimizer = torch.optim.SGD(model.parameters(), opt.lr, momentum=0.9, nesterov=True)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
        epoch = 0
        best_loss = 1e5
        best_epoch = 0
        step = max(0, last_step)
        model.train()
        num_iter_per_epoch = len(training_generator)
        try:
            for epoch in range(opt.EPOCHS):
                last_epoch = step // num_iter_per_epoch
                if epoch < last_epoch:
                    continue
    
                epoch_loss = []
                progress_bar = tqdm(training_generator)
                for iter, data in enumerate(progress_bar):
                    if iter < step - last_epoch * num_iter_per_epoch:
                        progress_bar.update()
                        continue
                    try:
                        imgs = data['img']
                        annot = data['annot']
                        if params.num_gpus == 1:
                            # if only one gpu, just send it to cuda:0
                            # elif multiple gpus, send it to multiple gpus in CustomDataParallel, not here
                            imgs = imgs.cuda()
                            annot = annot.cuda()

                        optimizer.zero_grad()
                        cls_loss, reg_loss = model(imgs, annot, obj_list=params.obj_list)
                        cls_loss = cls_loss.mean()
                        reg_loss = reg_loss.mean()
                        loss = cls_loss + reg_loss
                        if loss == 0 or not torch.isfinite(loss):
                            continue
            
                        loss.backward()
                        # torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                        optimizer.step()
                        epoch_loss.append(float(loss))

                        print(
                            'Step: {}. Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Total loss: {:.5f}'.format(
                                step, epoch, opt.EPOCHS, iter + 1, num_iter_per_epoch, cls_loss.item(),
                                reg_loss.item(), loss.item()))
                        #progress_bar.set_description(
                        #    'Step: {}. Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Total loss: {:.5f}'.format(
                        #        step, epoch, opt.EPOCHS, iter + 1, num_iter_per_epoch, cls_loss.item(),
                        #        reg_loss.item(), loss.item()))
                        writer.add_scalars('Loss', {'train': loss}, step)
                        writer.add_scalars('Regression_loss', {'train': reg_loss}, step)
                        writer.add_scalars('Classfication_loss', {'train': cls_loss}, step)
                        # log learning_rate
                        current_lr = optimizer.param_groups[0]['lr']
                        writer.add_scalar('learning_rate', current_lr, step)
                        step += 1 
                        if step % opt.save_interval == 0 and step > 0:
                            save_checkpoint(model, f'efficientdet-d{opt.compound_coef}_{epoch}_{step}.pth')
                            print('checkpoint...')
                    except Exception as e:
                        print('[Error]', traceback.format_exc())
                        if 'CUDA out of memory' in str(e):
                            print("清除内存")
                            if hasattr(torch.cuda,'empty_cache'):
                                torch.cuda.empty_cache()
                        else:
                            print(e)
                        continue
                scheduler.step(np.mean(epoch_loss))
                if epoch % opt.val_interval == 0:
                    model.eval()
                    loss_regression_ls = []
                    loss_classification_ls = []
                    for iter, data in enumerate(val_generator):
                        with torch.no_grad():
                            imgs = data['img']
                            annot = data['annot']
                            if params.num_gpus == 1:
                                imgs = imgs.cuda()
                                annot = annot.cuda()
                            cls_loss, reg_loss = model(imgs, annot, obj_list=params.obj_list)
                            cls_loss = cls_loss.mean()
                            reg_loss = reg_loss.mean()
                            loss = cls_loss + reg_loss
                            if loss == 0 or not torch.isfinite(loss):
                                continue
                
                            loss_classification_ls.append(cls_loss.item())
                            loss_regression_ls.append(reg_loss.item())
                    cls_loss = np.mean(loss_classification_ls)
                    reg_loss = np.mean(loss_regression_ls)
                    loss = cls_loss + reg_loss
                    print(
                        'Val. Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Total loss: {:1.5f}'.format(
                            epoch, opt.EPOCHS, cls_loss, reg_loss, loss))
                    writer.add_scalars('Loss', {'val': loss}, step)
                    writer.add_scalars('Regression_loss', {'val': reg_loss}, step)
                    writer.add_scalars('Classfication_loss', {'val': cls_loss}, step)
                    if loss + opt.es_min_delta < best_loss:
                        best_loss = loss
                        best_epoch = epoch
                        save_checkpoint(model, f'efficientdet-d{opt.compound_coef}_{epoch}_{step}.pth')

                    model.train()
                    # Early stopping
                    if epoch - best_epoch > opt.es_patience > 0:
                        print('[Info] Stop training at epoch {}. The lowest loss achieved is {}'.format(epoch, best_loss))
                        break
        except KeyboardInterrupt:
            save_checkpoint(model, f'efficientdet-d{opt.compound_coef}_{epoch}_{step}.pth')
            writer.close()
        writer.close()

def save_checkpoint(model, name):
    if isinstance(model, CustomDataParallel):
        torch.save(model.module.model.state_dict(), os.path.join(opt.saved_path, name))
    else:
        torch.save(model.model.state_dict(), os.path.join(opt.saved_path, name))

if __name__ == '__main__':
    opt = get_args()
    main = Main()
    main.download_data()
    main.deal_with_data()
    #main.image_show()
    main.train(opt)