import argparse
from pathlib import Path
import torch
from torch.utils.data import DataLoader
from dataset import Pix2CodeDataset
from utils import collate_fn, ids_to_tokens, generate_visualization_object, resnet_img_transformation
from model.model2 import *
from nltk.translate.bleu_score import corpus_bleu, SmoothingFunction
from tqdm import tqdm
from vocab import Vocab
from html_transpiler.html_transpiler import HTMLTranspiler

parser = argparse.ArgumentParser(description='Evaluate the model')

parser.add_argument("--model_file_path", type=str,
                    help="Path to the trained model file", required=False, default=r'F:\余孝琴\pix2-code-plus-210705\model-pix2code\result\web\model2\e-d-model--epoch-19--loss-0.0032--batch-4--time-07-10-16-07.pth')
parser.add_argument("--data_path", type=str,
                    default=Path("data", "web", "all_data"), help="Datapath")
parser.add_argument("--cuda", action='store_true',
                    default=True, help="Use cuda or not")
parser.add_argument("--img_crop_size", type=int, default=224)
parser.add_argument("--split", type=str, default="test")
parser.add_argument("--viz", action='store_true',
                    default=False,)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--seed", type=int, default=2020,
                    help="The random seed for reproducing ")
parser.add_argument("--vocab_file_path", type=str,
                    default=None, help="Path to the vocab file")

if __name__ == '__main__':
    args = parser.parse_args()
    args.vocab_file_path = args.vocab_file_path if args.vocab_file_path else Path(
        Path(args.data_path).parent, "vocab.txt")
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    # Load the vocab file
    vocab = Vocab(args.vocab_file_path)
    assert len(vocab) > 0
    # Setup GPU
    use_cuda = True if args.cuda and torch.cuda.is_available() else False
    assert use_cuda  # Trust me, you don't want to train this model on a cpu.
    device = torch.device("cuda:0" if use_cuda else "cpu")

    transform_imgs = resnet_img_transformation(args.img_crop_size)

    # Creating the data loader

    data_set = Pix2CodeDataset(args.data_path, args.split,
                               vocab, transform=transform_imgs)
    data_loader = DataLoader(
        data_set,
        batch_size=args.batch_size,
        collate_fn=lambda data: collate_fn(data, vocab=vocab),
        pin_memory=True if use_cuda else False,
        # num_workers=4,
        drop_last=True)
    print('--------------------------')
    print('len_date_set', len(data_set))
    print('len_train_loader', len(data_loader))
    print('--------------------------')

    print("Created data loader")

    pipeline = Pipeline(vocab_size=len(vocab), embedding_size=256,
                        queries_size=256, keys_size=256, num_hiddens=256, dropout=0.1,
                        lstm_intput_size=256, lstm_num_hiddens=256, lstm_dropout=0.1, bias=True, num_layers=1,
                        decoder_input_size=256 * 2, decoder_num_hiddens=256, decoder_dropout=0.1, decoder_num_layers=1,
                        device=device
                        )

    loaded_model = torch.load(args.model_file_path)
    # print(loaded_model)
    pipeline.load_state_dict(loaded_model['model_state_dict'])
    print("load model success")
    pipeline.to(device)
    pipeline.eval()

    predictions = []
    targets = []
    start = torch.tensor([vocab.get_id_by_token(vocab.get_start_token())]).to(device)
    end = vocab.get_id_by_token(vocab.get_end_token())
    for i, (image, caption) in enumerate(tqdm(data_loader.dataset)):

        image = image.to(device).unsqueeze(0)
        caption = caption.to(device).unsqueeze(0)

        sample_ids = pipeline.sample(image, end)
        sample_ids = sample_ids.cpu().data.numpy()


        predictions.append(sample_ids)
        targets.append(caption.cpu().numpy())

    predictions = [ids_to_tokens(vocab, prediction) for prediction in predictions]
    targets = [ids_to_tokens(vocab, target.reshape(-1)) for target in targets]

    # valid_dsl_mapping_file_path = r"html_transpiler/web-dsl-mapping.json";
    # transpiler = HTMLTranspiler(valid_dsl_mapping_file_path)
    # for i, prediction in enumerate(predictions):
    #     html = transpiler.transpile(prediction)
    #     with open("result/web/html/html_Test{}.html".format(i), "w", encoding="utf-8") as f:
    #         f.write(html)

    bleu = corpus_bleu([[target] for target in targets], predictions,
                       smoothing_function=SmoothingFunction().method4)
    print("BLEU score: {}".format(bleu))

    if args.viz:
        generate_visualization_object(data_loader.dataset, predictions, targets)
        print("generated visualisation object")


