import argparse
from pathlib import Path
import torch
from torch.utils.data import DataLoader
from dataset import Pix2CodeDataset
from utils import collate_fn, ids_to_tokens, generate_visualization_object, resnet_img_transformation
from model.model2 import *
from nltk.translate.bleu_score import corpus_bleu, SmoothingFunction
from tqdm import tqdm
from html_transpiler.html_transpiler import HTMLTranspiler
from test import BaiDuOcr
from PIL import Image


parser = argparse.ArgumentParser(description='Evaluate the model')

parser.add_argument("--model_file_path", type=str,
                    help="Path to the trained model file", required=False, default=r'F:\余孝琴\pix2-code-plus-210705\model-pix2code\result\web\model2\e-d-model--epoch-19--loss-0.0032--batch-4--time-07-10-16-07.pth')
parser.add_argument("--image_path", type=str, help="ImagePath", default= r'F:\余孝琴\pix2code-pytorch\data\web\all_data\C890DF73-8E77-4D64-B0D0-FC5F7C0A79AB.png')
parser.add_argument("--gui_path", type=str, default=None, help="GUIPath")
parser.add_argument("--cuda", action='store_true',
                    default=True, help="Use cuda or not")
parser.add_argument("--img_crop_size", type=int, default=224)
parser.add_argument("--seed", type=int, default=2020,
                    help="The random seed for reproducing ")

args = parser.parse_args()
print("Evaluation args:", args)
img_path = Path(args.image_path)
ocr = BaiDuOcr()
ocr.execute(img_path)
if args.gui_path is not None:
    tokens_path = Path(args.gui_path)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)

# Setup GPU
use_cuda = True if args.cuda and torch.cuda.is_available() else False
assert use_cuda  # Trust me, you don't want to train this model on a cpu.
device = torch.device("cuda" if use_cuda else "cpu")

# Loading the model
embed_size = 256
hidden_size = 512
num_layers = 1

assert Path(args.model_file_path).exists()
loaded_model = torch.load(args.model_file_path)
vocab = loaded_model["vocab"]

pipeline = Pipeline(vocab_size=len(vocab), embedding_size=256,
                        queries_size=256, keys_size=256, num_hiddens=256, dropout=0.1,
                        lstm_intput_size=256, lstm_num_hiddens=256, lstm_dropout=0.1, bias=True, num_layers=1,
                        decoder_input_size=256 * 2, decoder_num_hiddens=256, decoder_dropout=0.1, decoder_num_layers=1,
                        device=device
                        )
loaded_model = torch.load(args.model_file_path)
# print(loaded_model)
pipeline.load_state_dict(loaded_model['model_state_dict'])
print("load model success")
pipeline.to(device)
pipeline.eval()


transform_imgs = resnet_img_transformation(args.img_crop_size)

end = vocab.get_id_by_token(vocab.get_end_token())
image = Image.open(img_path).convert('RGB')
image = transform_imgs(image).unsqueeze(0)
image = image.to(device)

sample_ids = pipeline.sample(image, end)
prediction = sample_ids.cpu().data.numpy()
prediction = ids_to_tokens(vocab, prediction)
valid_dsl_mapping_file_path = r"html_transpiler/web-dsl-mapping.json";
transpiler = HTMLTranspiler(valid_dsl_mapping_file_path)
html = transpiler.transpile(prediction)
with open("sds{}.html".format('1996'), "w", encoding="utf-8") as f:
    f.write(html)
print("finish")

# targets.append(caption.cpu().numpy())

# predictions = [ids_to_tokens(vocab, prediction) for prediction in predictions]
# targets = [ids_to_tokens(vocab, target) for target in targets]

# valid_dsl_mapping_file_path = r"html_transpiler/web-dsl-mapping.json";
# transpiler = HTMLTranspiler(valid_dsl_mapping_file_path)
# for i, prediction in enumerate(predictions):
#     html = transpiler.transpile(prediction)
#     with open("html_Test{}.html".format(i), "w", encoding="utf-8") as f:
#         f.write(html)

# bleu = corpus_bleu([[target] for target in targets], predictions,
#                    smoothing_function=SmoothingFunction().method4)
# print("BLEU score: {}".format(bleu))

# if args.viz:
#     generate_visualization_object(data_loader.dataset, predictions, targets)
#     print("generated visualisation object")
