import glob
import json
import os
import random
import sys

import cv2
import numpy as np
import torch
from easydict import EasyDict

sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/FOTS/rroi_align/build/lib.linux-x86_64-3.8/")
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")


from torch.nn import CTCLoss
from torch.utils.data import Dataset, DataLoader

from FOTS.data_loader.datautils import normalize_iamge
from FOTS.model.loss import get_acc_from_logits_and_gt

from FOTS.model.raw_crnn import RAWCRNN
from FOTS.utils.util import str_label_converter
from sys_mgr import MetricLogger, BaseMetricLogger
import torch.nn as nn

def initialize_weights(model):
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            nn.init.kaiming_uniform_(m.weight)

            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            nn.init.constant_(m.weight, 1)
            nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.Linear):
            nn.init.kaiming_uniform_(m.weight)
            nn.init.constant_(m.bias, 0)

class lp_plate_recog_set_with_color(Dataset):
    def __init__(self,
                 root_pathes=["/home/data/777", "/home/data/778", "/home/data/781"],
                 # root_pathes=["../data/777", "../data/778", "../data/781"],
                 train=True,
                 ):
        self.root_pathes = root_pathes
        self.files = []
        self.train = train
        for root_path in self.root_pathes:
            self.files.extend(glob.glob(root_path + "/*.jpg"))

        random.shuffle(self.files)

        self.lps = []
        self.colors = []
        self.color_IDs = []
        for file in self.files:
            lp = os.path.basename(file).split("_")[0]
            color = os.path.basename(file).split("_")[1].split(".jpg")[0]
            colorID = LpDetSetWithColor.color2clsID(color)
            self.lps.append(lp)
            self.colors.append(color)
            self.color_IDs.append(colorID)

        size_all = len(self.files)
        if train:
            self.files = self.files[0:int(0.9 * size_all)]
            self.lps = self.lps[0:int(0.9 * size_all)]
            self.colors = self.colors[0:int(0.9 * size_all)]
            self.color_IDs = self.color_IDs[0:int(0.9 * size_all)]
        else:
            self.files = self.files[int(0.9 * size_all):]
            self.lps = self.lps[int(0.9 * size_all):]
            self.colors = self.colors[int(0.9 * size_all):]
            self.color_IDs = self.color_IDs[int(0.9 * size_all):]

        print("LOADED DONE len:{}".format(len(self.files)))

    def __getitem__(self, idx):
        im = cv2.imread(self.files[idx])
        im = cv2.resize(im, (120, 40))
        im_tensor = normalize_iamge(im, aug=self.train)
        im_tensor = torch.from_numpy(im_tensor).cuda().float()
        im_tensor = im_tensor.permute(2, 0, 1)
        lp = self.lps[idx]
        colorID = self.color_IDs[idx]
        transcripts = str_label_converter.encode(lp)
        while len(transcripts[0]) < 10:
            transcripts[0].append(68)
        return im_tensor, lp, np.array(transcripts[0]), transcripts[1], torch.tensor(colorID).cuda()

    def __len__(self):
        return len(self.lps)


class lp_plate_recog_set(Dataset):
    def __init__(self, root_path="/home/leo/Downloads/data_set/lp_reg/train"):
        self.root_path = root_path
        self.files = glob.glob(root_path + "/*.png")
        self.train = "train" in root_path
        self.lps = []
        for file in self.files:
            lp = os.path.basename(file).split("_")[-1].split(".png")[0]
            self.lps.append(lp)
        return

    def __getitem__(self, idx):
        im = cv2.imread(self.files[idx])
        im = cv2.resize(im, (120, 40))
        im_tensor = normalize_iamge(im, aug=self.train)
        im_tensor = torch.from_numpy(im_tensor).cuda().float()
        im_tensor = im_tensor.permute(2, 0, 1)
        lp = self.lps[idx]
        transcripts = str_label_converter.encode(lp)
        while len(transcripts[0]) < 10:
            transcripts[0].append(68)
        return im_tensor, lp, np.array(transcripts[0]), transcripts[1], torch.tensor(0).cuda()

    def __len__(self):
        return len(self.lps)


class LpDetSetWithColor(Dataset):
    def __init__(self, root_path="/home/data/782/"):
        # self.container = MetaLpContainer(root="/home/data/782/")
        self.container = MetaLpContainer(root="/home/data/single/")
        random.shuffle(self.container.all_expanded_car_strus)
        size_all = len(self.container.all_expanded_car_strus)

        self.train = True
        # self.train = "train" in root_path
        # if "train" in root_path:
        #     self.container.all_expanded_car_strus = self.container.all_expanded_car_strus[0:int(0.9 * size_all)]
        # elif "val" in root_path:
        #     self.container.all_expanded_car_strus = self.container.all_expanded_car_strus[int(0.9 * size_all):]
        # else:
        #     raise ("MODE ERROR DATA SET")

        print("Init DataSet DONE size set: {}".format(len(self.container.all_expanded_car_strus)))

    @staticmethod
    def color2clsID(color):
        return {
            "green": 0,
            "blue": 1,
            "yellow": 2,
            "white": 3,
            "black": 4,
        }.get(color)

    @staticmethod
    def clsID2color(ID):
        return {
            0: "green",
            1: "blue",
            2: "yellow",
            3: "white",
            4: "black",
        }.get(int(ID))

    def __getitem__(self, idx):
        full_bgr_im, car_boxes, relative_lp_ptses, ocrs, lp_colors = \
            self.container.get_a_pic_cars(idx)
        im_t, car_boxes = default_loader(full_bgr_im,
                                         car_boxes)
        visual_loaded(im_t, car_boxes, relative_lp_ptses, ocrs, lp_colors)

        if self.train:
            Y_feat_map0 = torch.from_numpy(labels2output_map(car_boxes, relative_lp_ptses, stride=8))
            Y_feat_map1 = torch.from_numpy(labels2output_map(car_boxes, relative_lp_ptses, stride=16))
            Y_feat_map2 = torch.from_numpy(labels2output_map(car_boxes, relative_lp_ptses, stride=32))

            if torch.cuda.is_available():
                Y_feat_map0 = Y_feat_map0.cuda()
                Y_feat_map1 = Y_feat_map1.cuda()
                Y_feat_map2 = Y_feat_map2.cuda()
            return im_t, Y_feat_map0, Y_feat_map1, Y_feat_map2
        else:
            return im_t, car_boxes, relative_lp_ptses, ocrs, lp_colors

    def __len__(self):
        return len(self.container.all_pic_strus)



def eval_model_crnn(model, eval_loader, epoch):
    model.eval()

    eval_metric_loger = BaseMetricLogger()

    for cnt, (im_tensor, lps, transcripts, lengths, colorIDs) in enumerate(eval_loader):
        out, out_logits = model(im_tensor)  # T B C

        with torch.no_grad():
            gt = [transcripts, lengths[0].int()]
            pred = [out, torch.fill_(torch.ones_like(lengths[0]), out.shape[0]).int()]
            acc = get_acc_from_logits_and_gt(gt, pred)

        acc_color = float((out_logits.argmax(dim=1) == colorIDs).sum() / colorIDs.shape[0])
        eval_metric_loger.update(acc, 0.0, acc_color, epoch)

    model.train()
    return eval_metric_loger.get_acc(epoch), eval_metric_loger.get_acc_color(epoch)


if __name__ == '__main__':
    # config = json.load(open("pretrain.json"))
    # config = EasyDict(config)
    # model = Recognizer(69, config)
    model = RAWCRNN(40, 3, 69, 256)
    initialize_weights(model)

    whole_epochs = 100
    lr = 1e-4
    model.cuda()
    model.train()

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=whole_epochs, eta_min=0)

    ctc_loss = CTCLoss()
    obj_color = nn.CrossEntropyLoss()

    train_set = lp_plate_recog_set(root_path="/home/leo/Downloads/data_set/lp_reg/train")
    # train_set = lp_plate_recog_set_with_color(train=True)
    train_loader = DataLoader(train_set, batch_size=8, shuffle=True)

    eval_set = lp_plate_recog_set(root_path="/home/leo/Downloads/data_set/lp_reg/val")
    # eval_set = lp_plate_recog_set_with_color(train=False)
    eval_loader = DataLoader(eval_set, batch_size=8, shuffle=True)

    metric_loger = BaseMetricLogger()
    best_loss = 99999
    for epoch in range(whole_epochs):
        for cnt, (im_tensor, lps, transcripts, lengths, colorIDs) in enumerate(train_loader):
            out, out_logits = model(im_tensor)  # T B C

            with torch.no_grad():
                gt = [transcripts, lengths[0].int()]
                pred = [out, torch.fill_(torch.ones_like(lengths[0]), out.shape[0]).int()]
                acc = get_acc_from_logits_and_gt(gt, pred)

            pred_tensor = torch.softmax(pred[0], dim=-1).argmax(dim=-1).t().detach().cpu()
            loss_ctc = ctc_loss(torch.log_softmax(out, dim=-1), transcripts,
                                # lengths[0].int(),
                                torch.fill_(torch.ones_like(lengths[0]), out.shape[0]).int(),
                                lengths[0].int()
                                )
            loss_color = obj_color(out_logits, colorIDs)

            loss = loss_color + loss_ctc
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            acc_color = float((out_logits.argmax(dim=1) == colorIDs).sum() / colorIDs.shape[0])
            metric_loger.update(acc, float(loss), acc_color, epoch)
            print("Epoch:{} [{}/{}] loss:{} acc:{} acc_color:{}"
                  .format(epoch, cnt, len(train_loader), float(loss), acc, acc_color))

            sys.stdout.flush()

        eval_acc, eval_acc_color = eval_model_crnn(model, eval_loader, epoch)
        epoch_loss = metric_loger.get_loss(epoch)
        print("Epoch:{} Loss_Sum: {} train acc:{:.3f} eval acc: {:.3f} "
              "train accc:{:.3f} eval accc: {:.3f} lr:{}".format(
            epoch,
            epoch_loss,
            metric_loger.get_acc(epoch),
            eval_acc,
            metric_loger.get_acc_color(epoch),
            eval_acc_color,
            optimizer.param_groups[0]["lr"]
        ))
        if epoch_loss < best_loss:
            best_loss = epoch_loss
            if eval_acc > 0.8:
                torch.save(model.state_dict(),
                           "/project/train/models/CRNN{}_l{:.8f}_tc{:.3f}_vc{:.3f}_tcc{:.3f}_vcc{:.3f}"
                           .format(epoch, epoch_loss,
                                   metric_loger.get_acc(epoch),
                                   eval_acc,
                                   metric_loger.get_acc_color(epoch),
                                   eval_acc_color,
                                   ))
        sys.stdout.flush()
        scheduler.step()
