import datetime
import warnings
from pathlib import Path

import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from transformers import T5ForConditionalGeneration, AutoTokenizer

# from transformers import T5ForConditionalGeneration, T5Tokenizer

from instruction_re.arg_parse import get_train_args
from instruction_re.collator import Collator
from instruction_re.dataset import T5REDataset
from instruction_re.utils.train_utils import train
from instruction_re.core.datatypes import TaskType
from instruction_re.utils.utils import (
    load_config,
    load_json,
    loads_json,
    set_global_seed,
)
import json
from accelerate import Accelerator

accelerator = Accelerator()


warnings.filterwarnings("ignore")


def main():
    args = get_train_args()
    now = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")

    config = load_config(args.path_to_model_config)

    set_global_seed(config["seed"])

    writer = None
    if args.log_dir is not None:
        log_dir = args.log_dir + "/" + now
        writer = SummaryWriter(log_dir=log_dir)

    # load all helper files
    # options = load_json(args.path_to_options)
    # options = options[config["data"]["dataset"]]

    instructions = load_json(args.path_to_instructions)

    # load data files
    data_train = loads_json(config["data"]["train"])

    valid_path = config["data"]["valid"]
    if valid_path is None:
        data_train, data_valid = train_test_split(
            data_train, test_size=0.15, random_state=config["seed"]
        )
    else:
        data_valid = loads_json(config["data"]["valid"])

    label_path = config["data"]["label"]
    if label_path is not None:
        options = loads_json(label_path)
        options = [o.lower() for o in options]
    else:
        options = load_json(args.path_to_options)
        options = options[config["data"]["dataset"]]

    # Create Datasets
    train_dataset = T5REDataset(
        data=data_train,
        instructions=instructions["train"],
        options=options,
        tasks=[
            TaskType.RC,
            TaskType.TE,
            # TaskType.OE,
            # TaskType.EPE,
        ],
    )

    valid_dataset = T5REDataset(
        data=data_valid,
        instructions=instructions["test"],
        options=options,
        tasks=[TaskType.RC],
    )

    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    model_path = (
        config["model"]["path"]
        if "path" in config["model"]
        else config["model"]["name"]
    )

    # load model
    # tokenizer = T5Tokenizer.from_pretrained(config["model"]["name"])
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = T5ForConditionalGeneration.from_pretrained(model_path)
    # model.to(device)
    
    

    tokenizer_kwargs = dict(config["tokenizer"])
    generation_kwargs = dict(config["generation"])

    if config["replace_labels_with_special_tokens"]:
        # TODO add special tokens to tokenizer and model
        pass

    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=float(config["training"]["learning_rate"]),
    )

    collator = Collator(
        tokenizer=tokenizer,
        tokenizer_kwargs=tokenizer_kwargs,
    )

    print(train_dataset[0])
    # print(train_dataset[1])
    # print(train_dataset[2])

    train_dataloader = DataLoader(
        dataset=train_dataset,
        batch_size=int(config["training"]["batch_size"]),
        shuffle=True,
        collate_fn=collator,
    )

    valid_dataloader = DataLoader(
        dataset=valid_dataset,
        batch_size=int(config["evaluation"]["batch_size"]),
        shuffle=True,
        collate_fn=collator,
    )

    eval_every_n_batches = args.eval_every_n_batches
    pred_every_n_batches = args.pred_every_n_batches

    path_to_save_trained_model = Path(f"{args.path_to_model_save}")
    path_to_save_trained_model.mkdir(parents=True, exist_ok=True)

    # 复制一份 config 文件
    with open(f"{path_to_save_trained_model}/config.json", "w") as f2:
        json.dump(config, f2, indent=4)

    do_save_best_checkpoint = bool(config["training"]["do_save_best_checkpoint"])
    path_to_save_best_checkpoint = None
    if do_save_best_checkpoint:
        path_to_save_best_checkpoint = path_to_save_trained_model / "best"
        path_to_save_best_checkpoint.mkdir(exist_ok=True)

    train(
        n_epochs=int(config["training"]["n_epoch"]),
        model=model,
        tokenizer=tokenizer,
        train_dataloader=train_dataloader,
        test_dataloader=valid_dataloader,
        optimizer=optimizer,
        writer=writer,
        # device=device,
        eval_every_n_batches=eval_every_n_batches,
        pred_every_n_batches=pred_every_n_batches,
        generation_kwargs=generation_kwargs,
        options=options,
        path_to_save_model=path_to_save_best_checkpoint.as_posix(),
        metric_name_to_choose_best=config["training"]["metric_name"],
        metric_avg_to_choose_best=config["training"]["metric_avg"],
        accelerator=accelerator,
    )

    # path_to_save_model_last = path_to_save_trained_model / "last"
    # path_to_save_model_last.mkdir(exist_ok=True)

    # model.save_pretrained(path_to_save_model_last)
    # tokenizer.save_pretrained(path_to_save_model_last)


if __name__ == "__main__":
    main()
    # test()
