import os

import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm

from config import get_parser
from datalist import LPRDataLoader, CHARS
from model import LPRNet

'''
DDP模式，专门用于在单机多GPU的环境下显示，速度块
'''


# 将list转为tensor，这一步也可以直接并到getitem中
def collate_fn(batch):
    imgs = []
    labels = []
    lengths = []
    for _, sample in enumerate(batch):
        img, label, length = sample
        img = torch.from_numpy(img)
        # img = img.unsqueeze(dim=0)
        imgs.append(img)
        labels.extend(label)
        lengths.append(length)
    labels = np.asarray(labels).flatten().astype(np.int)

    return (torch.stack(imgs, 0), torch.from_numpy(labels), lengths)


best_acc = 0


class train():
    def __init__(self):
        self.args = get_parser()
        print(f"-----------{self.args.project_name}-------------")

        '''
        开启DDP模式
        '''
        local_rank = self.args.local_rank
        print(local_rank)
        torch.cuda.set_device(local_rank)
        dist.init_process_group(backend="nccl")
        # nccl 是GPU上最快、最推荐的后端

        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        if use_cuda:
            torch.cuda.manual_seed(self.args.seed)
            torch.cuda.manual_seed_all(self.args.seed)
        else:
            torch.manual_seed(self.args.seed)

        if use_cuda:
            self.device = torch.device('cuda', local_rank)
        else:
            self.device = torch.device("cpu")

        train_kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
        test_kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
        '''
        构造DataLoader
        '''
        print("Create DataLoader")
        self.lines = self.output_lines(self.args.base_dir)
        self.num_val = int(len(self.lines) * 0.3)
        self.num_train = len(self.lines) - self.num_val

        self.train_dataset = LPRDataLoader(self.lines[:self.num_train], self.args.img_size, self.args.lpr_max_len)
        self.test_dataset = LPRDataLoader(self.lines[self.num_train + 1:], self.args.img_size, self.args.lpr_max_len)

        self.train_sampler = DistributedSampler(self.train_dataset)
        self.test_sampler = DistributedSampler(self.test_dataset)

        self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.args.train_batch_size, **train_kwargs,
                                           collate_fn=collate_fn, sampler=self.train_sampler)
        self.test_dataloader = DataLoader(self.test_dataset, batch_size=self.args.test_batch_size, **test_kwargs,
                                          collate_fn=collate_fn, sampler=self.test_sampler)

        '''
        定义模型
        '''
        print("Create Model")
        self.model = LPRNet(lpr_max_len=self.args.lpr_max_len, phase=self.args.phase_train, class_num=len(CHARS),
                            dropout_rate=self.args.dropout_rate).to(self.device)

        '''
        根据需要加载与训练模型权重参数
        '''
        if True and self.args.pretrained_model and dist.get_rank() == 0:
            data_dict = torch.load(self.args.pretrained_model)["model_state_dict"]
            new_data_dict = {}
            for k, v in data_dict.items():
                new_data_dict[k[7:]] = v

            self.model.load_state_dict(new_data_dict, strict=True)
            print("load pretrained model successful!")
        else:

            print("initial net weights from stratch!")

        '''
        多GPU训练
        '''
        if torch.cuda.device_count() > 1:
            print("Let's use ", torch.cuda.device_count(), " GPUs")
            self.model = DDP(self.model, device_ids=[local_rank], output_device=local_rank)

        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''
        print("Establish the loss, optimizer and learning_rate function")
        self.criterion = nn.CTCLoss(blank=len(CHARS) - 1, reduction='mean')  # reduction: 'none' | 'mean' | 'sum'
        self.optimizer = optim.RMSprop(self.model.parameters(), lr=self.args.learning_rate, alpha=0.9, eps=1e-08,
                                       momentum=self.args.momentum, weight_decay=self.args.weight_decay)
        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=5, eta_min=1e-6)

        '''
        模型开始训练
        '''
        print("start training")
        for epoch in range(1, self.args.epoches + 1):
            self.train(epoch)
            if epoch % 1 == 0:
                self.test(epoch)
        torch.cuda.empty_cache()
        print("model finish training")

    def train(self, epoch):
        self.train_dataloader.sampler.set_epoch(epoch)
        self.model.train()
        average_loss = []

        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch{epoch}/{self.args.epoches}')
        for data, target, length in pbar:
            data, target = data.to(self.device), target.to(self.device)

            self.input_length, self.target_length = self.sparse_tuple_for_ctc(self.args.T_length, length)

            self.optimizer.zero_grad()
            output = self.model(data)
            output_prob = output.permute(2, 0, 1)
            output_prob = output_prob.log_softmax(2).requires_grad_()
            loss = self.criterion(output_prob, target, input_lengths=self.input_length,
                                  target_lengths=self.target_length)
            if loss.item() == np.inf:
                continue
            loss.backward()
            average_loss.append(loss.item())
            self.optimizer.step()
            pbar.set_description(
                f'Train Epoch:{epoch}/{self.args.epoches} train_loss:{round(np.mean(average_loss), 2)} learning_rate:{self.optimizer.state_dict()["param_groups"][0]["lr"]}')
        self.scheduler.step()

    def test(self, epoch):
        global best_acc
        self.test_dataloader.sampler.set_epoch(epoch)
        Tp = 0
        Tn_1 = 0
        Tn_2 = 0
        # model.eval() disable the dropout and use the population staticatics to make batchnorm
        self.model.eval()

        pbar = tqdm(self.test_dataloader, desc=f'Test Epoch:{epoch}/{self.args.epoches}', mininterval=0.3)
        for images, labels, lengths in pbar:

            start = 0
            targets = []
            for length in lengths:
                label = labels[start:start + length]
                targets.append(label)
                start += length
            targets = np.array([el.numpy() for el in targets])
            # stop tracing the grad from param to accerlate the computation
            with torch.no_grad():
                images = images.to(self.device)
                # forward
                prebs = self.model(images)
            # greedy decode
            prebs = prebs.cpu().detach().numpy()
            preb_labels = list()
            for i in range(prebs.shape[0]):
                preb = prebs[i, :, :]
                preb_label = list()
                for j in range(preb.shape[1]):
                    preb_label.append(np.argmax(preb[:, j], axis=0))
                no_repeat_blank_label = list()
                pre_c = preb_label[0]
                if pre_c != len(CHARS) - 1:
                    no_repeat_blank_label.append(pre_c)
                for c in preb_label:  # dropout repeate label and blank label
                    if (pre_c == c) or (c == len(CHARS) - 1):
                        if c == len(CHARS) - 1:
                            pre_c = c
                        continue
                    no_repeat_blank_label.append(c)
                    pre_c = c
                preb_labels.append(no_repeat_blank_label)
            for i, label in enumerate(preb_labels):
                if len(label) != len(targets[i]):
                    Tn_1 += 1
                    continue
                if (np.asarray(targets[i]) == np.asarray(label)).all():
                    Tp += 1
                else:
                    Tn_2 += 1

            pbar.set_description(
                f'Test Epoch:{epoch}/{self.args.epoches} Test Accuracy {Tp * 1.0 / (Tp + Tn_1 + Tn_2)} [{Tp}:{Tn_1}:{Tn_2}:{Tp + Tn_1 + Tn_2}]')

        # if Tp * 1.0 / (Tp + Tn_1 + Tn_2) > best_acc:
        best_acc = Tp * 1.0 / (Tp + Tn_1 + Tn_2)
        self.save_model(epoch, best_acc)

    def output_lines(self, base_dir):
        output = []
        for file in os.listdir(base_dir):
            for image in os.listdir(base_dir + "/" + file):
                output.append(base_dir + '/' + file + '/' + image)
        # for image in os.listdir(base_dir+'/'+'small_new_energy_2'):
        #    output.append(base_dir+'/'+'small_new_energy_2'+'/'+image)
        #	for image in os.listdir(base_dir+'/'+'single_blue_2'):
        #           output.append(base_dir+'/'+'single_blue_2'+'/'+image)
        #	for image in os.listdir(base_dir+'/'+'single_yellow_2'):
        #           output.append(base_dir+'/'+'single_yellow_2'+'/'+image)

        for file in os.listdir('/car_brand_new1/'):
            for image in os.listdir('/car_brand_new1/' + file):
                output.append('/car_brand_new1/' + file + '/' + image)

        return output

    def weights_init(m):
        for key in m.state_dict():
            if key.split('.')[-1] == 'weight':
                if 'conv' in key:
                    nn.init.kaiming_normal_(m.state_dict()[key], mode='fan_out')
                if 'bn' in key:
                    m.state_dict()[key][...] = nn.init.xavier_uniform(1)
            elif key.split('.')[-1] == 'bias':
                m.state_dict()[key][...] = 0.01

    def sparse_tuple_for_ctc(selff, T_length, lengths):
        input_lengths = []
        target_lengths = []

        for ch in lengths:
            input_lengths.append(T_length)
            target_lengths.append(ch)

        return tuple(input_lengths), tuple(target_lengths)

    def save_model(self, epoch, acc):

        path = "/weights_bigdata4"
        if not os.path.exists(path):
            os.mkdir(path)

        # 主机保存模型
        if dist.get_rank() == 0:
            torch.save({
                'epoch': epoch,
                'model_state_dict': self.model.state_dict(),
                "optimizer_state_dict": self.optimizer.state_dict(),
                "accuracy": acc
            }, path + '/' + 'Epoch_' + str(epoch) + "_" + 'Acc_' + str(round(acc, 4) * 100) + "%_car_plate_model.pth")
            print("save model successful")


if __name__ == "__main__":
    train = train()
