import sys
import os
import time
import tqdm
# os.environ['CUDA_VISIBLE_DEVICES']='0'
import torch
from config import opt
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms as T
import cv2
from PIL import Image
import numpy as np
from loguru import logger
from models import crnn
import torch.optim as optim
from torchvision import transforms


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

def readfile(filename):
	res = []
	with open(filename, 'r') as f:
		lines = f.readlines()
		for i in lines:
			res.append(i.strip())
	dic = {}
	for i in res:
		p = i.split(' ')
		dic[p[0]] = p[1:]
	return dic

class resizeNormalize(object):
	def __init__(self, size, interpolation=Image.BILINEAR):
		self.size = size
		self.interpolation = interpolation
		self.toTensor = transforms.ToTensor()

	def __call__(self, img):
		img = img.resize(self.size, self.interpolation)
		img = self.toTensor(img)
		img.sub_(0.5).div_(0.5)
		return img

class Chineseocr(Dataset):
	def __init__(self, imageroot, labelroot):
		self.image_dict = readfile(labelroot)
		self.image_root = imageroot
		self.image_name = [filename for filename, _ in self.image_dict.items()]

	def __getitem__(self, index):
		img_path = os.path.join(self.image_root, self.image_name[index])
		keys = self.image_dict.get(self.image_name[index])
		label = [int(x) for x in keys[:6]]
		Data = Image.open(img_path).convert('L')
		(w, h) = Data.size
		size_h = 32
		ratio = 32 / float(h)
		size_w = int(w * ratio)
		transform = resizeNormalize((size_w, size_h))
		Data = transform(Data)
		label = torch.IntTensor(label)

		return Data, label

	def __len__(self):
		return len(self.image_name)

train_data = Chineseocr(
	imageroot = opt.image_path,
	labelroot = opt.train_data_root
)

train_loader = DataLoader(
	train_data,
	batch_size = opt.batch_size,
	shuffle = True,
	num_workers = opt.num_workers
)

val_data = Chineseocr(
    imageroot = opt.image_path,
    labelroot = opt.validation_data_root
)
val_loader = DataLoader(
	val_data,
	batch_size = opt.batch_size,
	shuffle = True,
	num_workers = opt.num_workers
)

def decode(preds):
	pred = []
	for i in range(len(preds)):
		if preds[i] != 0 and (not (i > 0 and preds[i - 1] == preds[i])):
			pred.append(int(preds[i]))
	return pred


if __name__ == '__main__':
	# train
	import numpy as np
	all_labels = []
	for i, (data, label) in enumerate(train_loader):
		all_labels.extend(label.numpy())
	unique, counts = np.unique(all_labels, return_counts=True)
	print("类别分布:", dict(zip(unique, counts)))