import sys
import os
import time
import tqdm
# os.environ['CUDA_VISIBLE_DEVICES']='0'
import torch
from config import opt
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms as T
import cv2
from PIL import Image
import numpy as np
from loguru import logger
from models import crnn
import torch.optim as optim
from torchvision import transforms


device = torch.device("cpu")

def readfile(filename):
	res = []
	with open(filename, 'r') as f:
		lines = f.readlines()
		for i in lines:
			res.append(i.strip())
	dic = {}
	for i in res:
		p = i.split(' ')
		dic[p[0]] = p[1:]
	return dic

def decode(preds):
	pred = []
	for i in range(len(preds)):
		if preds[i] != 0 and ((i == 0) or (i != 0 and preds[i] != preds[i-1])):
			pred.append(int(preds[i]))
	return pred
		
class resizeNormalize(object):
	def __init__(self, size, interpolation=Image.BILINEAR):
		self.size = size
		self.interpolation = interpolation
		self.toTensor = transforms.ToTensor()

	def __call__(self, img):
		# img = img.resize(self.size, self.interpolation)
		img = self.toTensor(img)
		img.sub_(0.5).div_(0.5)
		return img
	
def resize(data):
    (w,h) = data.size
    size_h = 32
    ratio = 32 / float(h)
    size_w = int(w * ratio)
    transform = resizeNormalize((size_w,size_h))
    data = transform(data)
    if torch.cuda.is_available and opt.use_gpu:
        data = data.to(device)
    return data

if __name__ == '__main__':
    char_set = open('chars.txt', 'r', encoding='utf-8').readlines()
    char_set = ''.join([ch.strip('\n') for ch in char_set[1:]] + ['卍'])
    n_class = len(char_set)

    model = crnn.CRNN(opt.img_h, 1, n_class, 256)
    modelpath = "train/pytorch-crnn.pth"
	
    if os.path.exists(modelpath):
        print('Load model from "%s" ...' % modelpath)
        model.load_state_dict(torch.load(modelpath, map_location=device))
        print('Done!')
		
    model.eval()
    # data = Image.open("/mnt/disk2/captcha/captcha-ocr1/20170717_1709272188771_jsd3.png").convert('L')
    data0 = Image.open("/mnt/disk1/datasets/small/images/0000006-螺脱肥XZ.png").convert('L')
    data0 = resize(data0)

    data1 = Image.open("/mnt/disk1/datasets/small/images/0000028-晒变厦P4.png").convert('L')
    data1 = resize(data1)

    data2 = Image.open("/mnt/disk1/datasets/small/images/0000065-盗避AO.png").convert('L')
    data2 = resize(data2)

    data3 = Image.open("/mnt/disk1/datasets/small/images/0000218-畔渤9OYZ.png").convert('L')
    data3 = resize(data3)

    data = torch.stack((data0, data1, data2, data3), dim=0)
    # data = data.unsqueeze(0)
    output = model(data).to(device)
    pred_label = output.max(2)[1]#[:, 0:1]
    print(pred_label)
    pred_label = pred_label.transpose(1, 0).contiguous().view(-1)
    pred = decode(pred_label)
    print(pred)
    print(''.join([char_set[i - 1] for i in pred]))