import sys
import os
import time
import tqdm
# os.environ['CUDA_VISIBLE_DEVICES']='0'
import torch
from config import opt
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms as T
import cv2
from PIL import Image
import numpy as np
from loguru import logger
from models import crnn
import torch.optim as optim
from torchvision import transforms


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

def readfile(filename):
	res = []
	with open(filename, 'r') as f:
		lines = f.readlines()
		for i in lines:
			res.append(i.strip())
	dic = {}
	for i in res:
		p = i.split(' ')
		dic[p[0]] = p[1:]
	return dic

class resizeNormalize(object):
	def __init__(self, size, interpolation=Image.BILINEAR):
		self.size = size
		self.interpolation = interpolation
		self.toTensor = transforms.ToTensor()

	def __call__(self, img):
		img = img.resize(self.size, self.interpolation)
		img = self.toTensor(img)
		img.sub_(0.5).div_(0.5)
		return img

class Chineseocr(Dataset):
	def __init__(self, imageroot, labelroot):
		self.image_dict = readfile(labelroot)
		self.image_root = imageroot
		self.image_name = [filename for filename, _ in self.image_dict.items()]

	def __getitem__(self, index):
		img_path = os.path.join(self.image_root, self.image_name[index])
		keys = self.image_dict.get(self.image_name[index])
		label = [int(x) for x in keys[:6]]
		Data = Image.open(img_path).convert('L')
		(w,h) = Data.size
		size_h = 64
		ratio = 64 / float(h)
		size_w = int(w * ratio)
		transform = resizeNormalize((size_w,size_h))
		Data = transform(Data)
		label = torch.IntTensor(label)

		return Data, label, img_path

	def __len__(self):
		return len(self.image_name)

train_data = Chineseocr(
	imageroot = opt.image_path,
	labelroot = opt.train_data_root
)

train_loader = DataLoader(
	train_data,
	batch_size = opt.batch_size,
	shuffle = True,
	num_workers = opt.num_workers
)

val_data = Chineseocr(
    imageroot = opt.image_path,
    labelroot = opt.validation_data_root
)
val_loader = DataLoader(
	val_data,
	batch_size = opt.batch_size,
	shuffle = True,
	num_workers = opt.num_workers
)

def decode(preds):
	pred = []
	for i in range(len(preds)):
		if preds[i] != 0 and (not (i > 0 and preds[i - 1] == preds[i])):
			pred.append(int(preds[i]))
	return pred

def val(net,loss_func,max_iter = 1):
	print('start val')
	net.eval()
	totalloss = 0.0
	k = 0
	correct_num = 0
	total_num = 0
	single_image, single_label, single_path = train_data[0]
	single_image = single_image.unsqueeze(0)  # 增加batch维度
	single_label = single_label.unsqueeze(0)
	# val_iter = iter(val_loader)
	val_iter = iter([(single_image, single_label)])
	max_iter = min(max_iter, len(val_loader))
	for i in range(max_iter):
		k = k + 1
		(data, label) = next(val_iter)
		labels = torch.IntTensor([]).to(device)
		if torch.cuda.is_available and opt.use_gpu:
			data = data.to(device)
			label = label.to(device)

		for j in range(label.size(0)):
			labels = torch.cat((labels, label[j]), 0)
		
		output = net(data).to(device)
		output_size = torch.IntTensor([output.size(0)] * int(output.size(1))).to(device)
		label_size = torch.IntTensor([label.size(1)] * int(label.size(0))).to(device)
		loss = loss_func(output, labels, output_size, label_size) / label.size(0)
		totalloss += float(loss)
		pred_label = output.max(2)[1]
		pred_label = pred_label.transpose(1, 0).contiguous().view(-1)
		pred = decode(pred_label)
		total_num += len(pred)
		for x,y in zip(pred, labels):
			if int(x) == int(y):
				correct_num += 1
	accuracy = correct_num / float(total_num) * 100 if total_num > 0 else 0.00
	test_loss = totalloss / k
	print(single_path)
	logger.info("{}\tLoss: {}\taccuary: {:.6f}".format(time.strftime("[%Y-%m-%d-%H_%M_%S]", time.localtime(time.time())),
					test_loss, accuracy))
	return loss

if __name__ == '__main__':
    char_set = open('chars.txt', 'r', encoding='utf-8').readlines()
    char_set = ''.join([ch.strip('\n') for ch in char_set[1:]] + ['卍'])
    n_class = len(char_set)

    model = crnn.CRNN(opt.img_h, 1, n_class, 256)
    if torch.cuda.is_available and opt.use_gpu:
        model.to(device)

    modelpath = opt.modelpath

    learning_rate = opt.learning_rate
    loss_func = torch.nn.CTCLoss(zero_infinity = True)
    # optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.98)
    optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=opt.weight_decay)
    # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
    #     optimizer,  
    #     mode='min', 
    #     factor=0.1, 
    #     patience=5, 
    #     # verbose=True
    # )

    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
    # 定义余弦退火调度器
    #T_max = 50  # 周期长度（总迭代次数）
    #scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=T_max, eta_min=1e-5)  # eta_min是最小学习率
    if os.path.exists(modelpath):
        print('Load model from "%s" ...' % modelpath)
        model.load_state_dict(torch.load(modelpath))
        print('Done!')
    k = 0
    losstotal = 0.0
    printinterval = opt.printinterval
    valinterval = opt.valinterval
    numinprint = 0
	# train
    single_image, single_label, _ = train_data[0]
    single_image = single_image.unsqueeze(0)  # 增加batch维度
    single_label = single_label.unsqueeze(0)
    for epoch in range(opt.max_epoch):
        # for i, (data, label) in enumerate(train_loader):
        for i, (data, label) in enumerate([(single_image, single_label)]):
            k = k + 1
            numinprint = numinprint + 1
            if torch.cuda.is_available and opt.use_gpu:
                data = data.to(device)
                label = label.to(device)
            model.train()
            labels = torch.IntTensor([]).to(device)
            for j in range(label.size(0)):
                labels = torch.cat((labels, label[j]), 0)
			
            # print(data.size())
            # print(label.size())
            optimizer.zero_grad()
            output = model(data).to(device)
            output_size = torch.IntTensor([output.size(0)] * int(output.size(1))).to(device)
            label_size = torch.IntTensor([label.size(1)] * int(label.size(0))).to(device)
            loss = loss_func(output, labels, output_size, label_size) / label.size(0)
            losstotal += float(loss)

            if k % printinterval == 0:
                # display
                # optimizer.param_groups[0]['lr']
                # scheduler.get_last_lr()
                logger.info("{}\tEpoch: {}\tStep: {}\tAvgLoss: {}\tLr: {}".format(time.strftime("[%Y-%m-%d-%H_%M_%S]", time.localtime(time.time())),
					epoch, i + 1, losstotal / numinprint, optimizer.param_groups[0]['lr']))
                losstotal = 0.0
                numinprint = 0
                torch.save(model.state_dict(), opt.modelpath)
                
            loss.backward()

			# 监控梯度
			# if opt.printNorm:
            # total_norm = 0
            # for p in model.parameters():
            #     if p.grad is not None:
            #         param_norm = p.grad.data.norm(2)
            #         total_norm += param_norm.item() ** 2
            # total_norm = total_norm ** 0.5
            # logger.info("{}\tEpoch: {}\tStep: {}\t梯度范数:: {}".format(time.strftime("[%Y-%m-%d-%H_%M_%S]", time.localtime(time.time())),
            #     epoch, i + 1, total_norm))

            optimizer.step()
			
            if k % opt.updateLR == 0:
                scheduler.step()

            if k % valinterval == 0:
                # val
                val(model, loss_func)
        # torch.save(model.state_dict(), "/mnt/disk2/captcha/captcha-ocr1/train/crnn_epoch_%d.pth" % epoch)
