import json

import torch
import os
from pytorch_lightning import Trainer, Callback
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from tqdm import tqdm

from config.config import Config
from models.imcs_dac import ImcsDacModule
from data.data import ImcsDacDataModule


if __name__ == '__main__':
    #torch.set_float32_matmul_precision('high')
    torch.set_printoptions(precision=8)
    config = Config()

    datamodule = ImcsDacDataModule(config, tokenizer=config.tokenizer)
    datamodule.prepare_data()
    datamodule.setup(None)

    config.label_weights = datamodule.label_weights().to(config.device)
    config.label_smoothing = 0.1

    module = ImcsDacModule(config)
    config.update_save_path(module)

    ckpt_path = config.find_last_ckpt_path()
    module = ImcsDacModule(config)

    trainer = Trainer(
        accelerator=config.device,
        max_epochs=config.num_epochs,
        accumulate_grad_batches=config.gradient_accumulation_steps,
        check_val_every_n_epoch=1,
        num_sanity_val_steps=0,
        enable_checkpointing=True,
        default_root_dir=config.save_path
    )
    predicts = trainer.predict(module, datamodule=datamodule, return_predictions=True, ckpt_path=ckpt_path)

    datamodule.prepare_data()
    datamodule.setup(None)

    label_alphabet = datamodule.label_alphabet
    result = {}

    for idx, sample in enumerate(datamodule.predict_dataset):
        for sent_idx, dialogue in enumerate(sample['dialogues']):
            label_id = predicts[idx][sent_idx].item()
            dialogue_act = label_alphabet.lookup_object(label_id)

            dialogue['dialogue_act'] = dialogue_act
        result[sample['cid']] = sample['dialogues']

    with open("evalutation.json", "w", encoding="UTF-8") as fp:
        json.dump(result, fp, ensure_ascii=False)
