import sys
import os
import time
import tqdm
# os.environ['CUDA_VISIBLE_DEVICES']='0'
import torch
from config import opt
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms as T
import cv2
from PIL import Image
import numpy as np
from loguru import logger
from models import crnn
import torch.optim as optim
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt

def align_labels(labels, T, B, pad_value=0):
	"""
	将 labels 填充或截断到长度 T
	:param labels: 原始标签 (B, S)
	:param T: 目标长度
	:param pad_value: 填充值（通常为空白符索引）
	:return: 对齐后的标签 (B, T)
	"""
	labels = labels.view(B, -1)
	B, S = labels.shape
	if S < T:
		# 填充到长度 T
		padding = torch.full((B, T - S), pad_value, dtype=labels.dtype, device=labels.device)
		labels_aligned = torch.cat([labels, padding], dim=1)
	elif S > T:
		# 截断到长度 T
		labels_aligned = labels[:, :T]
	else:
		# 长度一致，无需处理
		labels_aligned = labels
	return labels_aligned

class FocalCTCLoss(nn.Module):
    def __init__(self, alpha=0.5, gamma=2):
        super(FocalCTCLoss, self).__init__()
        self.ctc_loss = nn.CTCLoss(zero_infinity = True)
        self.alpha = alpha  # 平衡因子
        self.gamma = gamma  # 难样本权重

    def forward(self, log_probs, targets, input_lengths, target_lengths):
        loss = self.ctc_loss(log_probs, targets, input_lengths, target_lengths)
        pt = torch.exp(-loss)
        focal_loss = self.alpha * (1 - pt) ** self.gamma * loss
        return focal_loss

"""	
class FocalCTCLoss(nn.Module):
	def __init__(self, alpha=0.25, gamma=2, blank=0):
		super().__init__()
		self.alpha = alpha  # 类别权重
		self.gamma = gamma  # 调节因子
		self.blank = blank  # CTC空白符索引
		self.ctc_loss = nn.CTCLoss(blank=blank, reduction='none', zero_infinity = True)  # 不进行均值或求和

	def forward(self, logits, labels, input_lengths, label_lengths):
		# 计算 Focal CTC Loss
		# :param logits: 模型输出 (B, T, C)
		# :param labels: 标签 (B, S)
		# :param input_lengths: 输入长度 (B,)
		# :param label_lengths: 标签长度 (B,)
		# :return: Focal CTC Loss
		
		T, B, C = logits.shape
		# 计算 CTC Loss
		ctc_loss = self.ctc_loss(logits, labels, input_lengths, label_lengths)  # (B,)
		
		# 计算 Focal Weight
		probs = torch.softmax(logits, dim=2)  # (B, T, C)
		# 确保 labels 的序列长度与 T 对齐（例如通过填充/截断）
		# 此处假设 labels 已对齐到 T
		# 对齐 labels 到长度 T
		labels_aligned = align_labels(labels, T, B, pad_value=self.blank)  # (B, T)
		labels_aligned = labels_aligned.permute(1, 0)
		# 收集对应标签的概率
		pt = probs.gather(2, labels_aligned.unsqueeze(-1).long()).squeeze(-1)  # (B, T)
		pt = pt.mean(dim=0)  # (B,)
		focal_weight = self.alpha * (1 - pt) ** self.gamma  # (B,)
		# print(focal_weight.size())
		# print(ctc_loss.size())
		# print(focal_weight.size())
		# 计算加权损失
		loss = ctc_loss * focal_weight  # (B,)
		return loss.mean()  # 返回均值
"""

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

def readfile(filename):
	res = []
	with open(filename, 'r') as f:
		lines = f.readlines()
		for i in lines:
			res.append(i.strip())
	dic = {}
	for i in res:
		p = i.split(' ')
		dic[p[0]] = p[1:]
	return dic

transform = transforms.Compose([
	# 颜色扰动 + 抗干扰增强
	# transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),
	# transforms.RandomApply([transforms.GaussianBlur(kernel_size=(5, 5))], p=0.5),  # 高斯模糊

	# 归一化与尺寸适配
	transforms.ToTensor(),
	# transforms.RandomErasing(p=0.5, scale=(0.02, 0.1)),  # 随机遮挡
	# transforms.Resize((32, 76), interpolation=Image.BILINEAR),  # 固定高度，动态宽度需自定义Collate_fn
	transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

val_transform = transforms.Compose([
	# 归一化与尺寸适配
	transforms.ToTensor(),
	# transforms.Resize((32, 76)),  # 固定高度，动态宽度需自定义Collate_fn
	transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

class resizeNormalize(object):
	def __init__(self, size, interpolation=Image.BICUBIC):
		self.size = size
		self.interpolation = interpolation
		self.toTensor = transforms.ToTensor()


	def __call__(self, img):
		# img = img.resize(self.size, self.interpolation)
		img = self.toTensor(img)
		img.sub_(0.5).div_(0.5)
		return img

class Chineseocr(Dataset):
	def __init__(self, imageroot, labelroot, transform=None):
		self.image_dict = readfile(labelroot)
		self.image_root = imageroot
		self.transform = transform
		self.image_name = [filename for filename, _ in self.image_dict.items()]

	def __getitem__(self, index):
		img_path = os.path.join(self.image_root, self.image_name[index])
		keys = self.image_dict.get(self.image_name[index])
		orilabel = [int(x) for x in keys]
		# label = np.pad(orilabel, (0, 6 - len(orilabel)), 'constant')
		Data = Image.open(img_path).convert('L')
		(w, h) = Data.size
		size_h = 32
		ratio = 32 / float(h)
		size_w = int(w * ratio)
		resize = resizeNormalize((size_w,size_h))
		Data = resize(Data)
		# if self.transform is not None:
		# 	Data = self.transform(Data)
		label = torch.IntTensor(orilabel)
		return Data, label, len(orilabel)

	def __len__(self):
		return len(self.image_name)

# ----------------------------
# 4. 自定义collate_fn处理变长标签
# ----------------------------
def collate_fn(batch):
    images = []
    labels = []
    label_lengths = []
    
    for item in batch:
        images.append(item[0])
        labels.append(item[1])
        label_lengths.append(item[2])
    
    # 图像堆叠
    images = torch.stack(images, dim=0)
    
    # 标签填充到最大长度
    max_len = max([len(label) for label in labels])
    padded_labels = torch.zeros(len(labels), max_len).long()
    for i, label in enumerate(labels):
        padded_labels[i, :len(label)] = label
    
    return images, padded_labels, torch.IntTensor(label_lengths)

train_data = Chineseocr(
	imageroot = opt.image_path,
	labelroot = opt.train_data_root,
	transform = transform
)

train_loader = DataLoader(
	train_data,
	batch_size = opt.batch_size,
	shuffle = True,
	num_workers = opt.num_workers,
	collate_fn=collate_fn
)

val_data = Chineseocr(
	imageroot = opt.image_path,
	labelroot = opt.validation_data_root,
	transform = val_transform
)
val_loader = DataLoader(
	val_data,
	batch_size = opt.batch_size,
	shuffle = True,
	num_workers = opt.num_workers,
	collate_fn=collate_fn
)

def decode(preds):
	pred = []
	for i in range(len(preds)):
		if preds[i] != 0 and (not (i > 0 and preds[i - 1] == preds[i])):
			pred.append(int(preds[i]))
	return pred

def check_gradient_norm(model):
    total_norm = 0.0
    for p in model.parameters():
        if p.grad is not None:
            param_norm = p.grad.detach().data.norm(2)  # L2范数
            total_norm += param_norm.item() ** 2
    total_norm = total_norm ** 0.5
    print(f"梯度总范数: {total_norm}")

def val(net, loss_func, max_iter = 64, char_set = []):
	print('start val')
	net.eval()
	totalloss = 0.0
	k = 0
	correct_num = 0
	total_num = 0
	val_iter = iter(val_loader)
	max_iter = min(max_iter, len(val_loader))
	for i in range(max_iter):
		k = k + 1
		(data, label, labelLen) = next(val_iter)
		if torch.cuda.is_available and opt.use_gpu:
			data = data.to(device)
			label = label.to(device)
		
		labels = torch.IntTensor([]).to(device)
		for j in range(label.size(0)):
			labels = torch.cat((labels, label[j]), 0)

		output = net(data).to(device)
		output_size = torch.IntTensor([output.size(0)] * int(output.size(1))).to(device)
		output = F.log_softmax(output, dim=2)
		loss = loss_func(output, label, output_size, labelLen)
		totalloss += float(loss.item())
		pred_label = output.max(2)[1]
		pred_label = pred_label.transpose(1, 0).contiguous().view(-1)
		pred = decode(pred_label)
		total_num += len(pred)
		# print(path)
		# print(label)
		# 自定义一个函数，判断元素是否为偶数
		def is_even(x):
			return x > 0

		labels = list(filter(is_even, labels))
		logger.info("预测: {}\t样本: {}".format(''.join([char_set[i - 1] for i in pred]), 
				''.join([char_set[i - 1] for i in labels])))
		for x,y in zip(pred, labels):
			if int(x) == int(y):
				correct_num += 1
	accuracy = correct_num / float(total_num) * 100 if total_num > 0 else 0.00
	test_loss = totalloss / k
	logger.info("{}\tLoss: {}\taccuary: {:.6f}".format(time.strftime("[%Y-%m-%d-%H_%M_%S]", time.localtime(time.time())),
					test_loss, accuracy))
	return loss

if __name__ == '__main__':
	char_set = open('chars.txt', 'r', encoding='utf-8').readlines()
	char_set = ''.join([ch.strip('\n') for ch in char_set[1:]] + ['卍'])
	n_class = len(char_set)
	torch.set_printoptions(threshold=float('inf'))
	model = crnn.CRNN(opt.img_h, 1, n_class, 256)
	if torch.cuda.is_available and opt.use_gpu:
		model.to(device)

	modelpath = opt.modelpath

	learning_rate = opt.learning_rate
	# loss_func = FocalCTCLoss(alpha=0.5, gamma=2)
	loss_func = torch.nn.CTCLoss(zero_infinity = True)
	# ce_loss = nn.CrossEntropyLoss()
	# optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.98)
	# optimizer = optim.Adadelta(model.parameters(), lr=learning_rate, rho=0.9, eps=1e-6)
	# optimizer = optim.RMSprop(model.parameters(), lr=learning_rate)
	# optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=opt.weight_decay)
	optimizer = optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=opt.weight_decay)
	
	# 使用学习率预热（Learning Rate Warmup）和余弦退火（Cosine Annealing）。
	warmup_scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=0.1, total_iters=10)
	cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=100)
	scheduler = torch.optim.lr_scheduler.SequentialLR(
		optimizer,
		schedulers=[warmup_scheduler, cosine_scheduler],
		milestones=[10]  # 预热阶段结束后切换到余弦退火
	)
	# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
	# 	optimizer,
	# 	mode='min',
	# 	factor=0.1,
	# 	patience=5,
	# 	# verbose=True
	# )

	# scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
	# 定义余弦退火调度器
	# T_max = 50  # 周期长度（总迭代次数）
	# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=T_max, eta_min=1e-5)  # eta_min是最小学习率
	if os.path.exists(modelpath):
		print('Load model from "%s" ...' % modelpath)
		model.load_state_dict(torch.load(modelpath))
		print('Done!')
	k = 0
	losstotal = 0.0
	printinterval = opt.printinterval
	valinterval = opt.valinterval
	numinprint = 0
	# train
	for epoch in range(opt.max_epoch):
		for i, (data, label, labelLen) in enumerate(train_loader):
			k = k + 1
			numinprint = numinprint + 1
			if torch.cuda.is_available and opt.use_gpu:
				data = data.to(device)
				label = label.to(device)
			model.train()
			# labels = torch.IntTensor([]).to(device)
			# for j in range(label.size(0)):
			# 	labels = torch.cat((labels, label[j]), 0)

			# 检查输入数据范围
			# print("Input Min:", data.min().item())
			# print("Input Max:", data.max().item())
			# 检查输入数据
			# plt.imshow(data[0].permute(1, 2, 0).cpu().numpy())
			# plt.show()
			optimizer.zero_grad()
			output = model(data).to(device)
			# print(output)
			output_size = torch.IntTensor([output.size(0)] * int(output.size(1))).to(device)
			# label_size = torch.IntTensor([label.size(1)] * int(label.size(0))).to(device)
			# output = torch.clamp(output, min=-1e5, max=1e5)  # 防止梯度爆炸导致数值溢出
			output = F.log_softmax(output, dim=2)
			loss = loss_func(output, label, output_size, labelLen)
			

			if k % printinterval == 0:
				# display
				# optimizer.param_groups[0]['lr']
				# scheduler.get_last_lr()
				logger.info("{}\tEpoch: {}\tStep: {}\tAvgLoss: {}\tLr: {}".format(time.strftime("[%Y-%m-%d-%H_%M_%S]", time.localtime(time.time())),
					epoch, i + 1, loss.item(), optimizer.param_groups[0]['lr']))
				numinprint = 0
				torch.save(model.state_dict(), opt.modelpath)

			loss.backward()
			
			# 检查梯度
			# for name, param in model.named_parameters():
			# 	if param.grad is not None:
			# 		print(name, param.grad.norm())
			# 	else:
			# 		print("梯度未计算！")
			# 检测梯度范数 是否超过
			# 正常范围：梯度范数通常在 1e3 以内。
			# 梯度爆炸：梯度范数超过 1e5 或出现 NaN。
			# check_gradient_norm(model)
			
			# 在更新权重之前，对梯度进行裁剪，使其不超过0.5
			# torch.nn.utils.clip_grad_value_(model.parameters(), clip_value=0.5)
			# torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=2.0, norm_type=2)
			optimizer.step()

			# if k % opt.updateLR == 0:
			#     scheduler.step()

			if k % valinterval == 0:
				# val
				val(model, loss_func, 64, char_set)
				# scheduler.step(val(model, loss_func))
		# torch.save(model.state_dict(), "/mnt/disk2/captcha/captcha-ocr1/train/crnn_epoch_%d.pth" % epoch)
		scheduler.step()
