import os
import sys
import time
from dataclasses import dataclass, field
from typing import Optional

from pytorch_lightning import Trainer
from transformers import HfArgumentParser, TrainingArguments, AutoTokenizer, AutoConfig

from data.data import ImcsDacDataModule
from models.imcs_dac import ImcsDacModule


@dataclass
class ModelArguments:
    model_name_or_path: Optional[str] = field(
        default="uer/chinese_roberta_L-12_H-768",
        metadata={
            "help": (
                "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch."
            )
        },
    )
    config_name: Optional[str] = field(
        default=None,
        metadata={
            "help": "Pretrained config name or path if not the same as model_name"
        }
    )
    tokenizer_name: Optional[str] = field(
        default=None,
        metadata={
            "help": "Pretrained tokenizer name or path if not the same as model_name"
        }
    )
    unfrozen_layers: Optional[str] = field(
        default='layer.10.,layer.11.,bert.pooler,pooler.dense,out.'
    )
    device: Optional[str] = field(
        default='cuda'
    )
    embed_dim: Optional[int] = field(
        default=768
    )

    def __post_init__(self):
        if self.unfrozen_layers:
            self.unfrozen_layers = self.unfrozen_layers.split(",")
        else:
            self.unfrozen_layers = []

        pass


@dataclass
class DataArguments:
    num_workers: Optional[int] = field(
        default=0,
        metadata={
            "help": (
                "dataloader num_workers"
            )
        },
    )
    max_seq_length: Optional[int] = field(
        default=43,
        metadata={
            "help": (
                "The maximum total input sequence length after tokenization. Sequences longer "
                "than this will be truncated."
            )
        },
    )

    def __post_init__(self):
        pass


@dataclass
class TrainingArguments:

    learning_rate: Optional[float] = field(
        default=5e-5
    )

    def __post_init__(self):
        pass


def main():
    parser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
        model_args, data_args, train_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
    else:
        model_args, data_args, train_args = parser.parse_args_into_dataclasses()

    if model_args.config_name:
        config = AutoConfig.from_pretrained(model_args.config_name)
    elif model_args.model_name_or_path:
        config = AutoConfig.from_pretrained(model_args.model_name_or_path)
    else:
        raise ValueError("missing model_name_or_path or config_name")

    if model_args.tokenizer_name:
        tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name)
    elif model_args.model_name_or_path:
        tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
    else:
        raise ValueError("missing model_name_or_path or tokenizer_name")

    model_args.embed_dim = config.hidden_size

    datamodule = ImcsDacDataModule(config=data_args, tokenizer=tokenizer)
    data_args.num_labels = datamodule.label_alphabet.size
    data_args.num_speakers = datamodule.speaker_alphabet.size

    start = time.time()
    for _ in datamodule.train_dataloader():
        i = 10
    print(time.time() - start)
    #
    # module = ImcsDacModule(train_args)
    #
    # trainer = Trainer(
    #     accelerator=config.device,
    #     max_epochs=config.num_epochs,
    #     accumulate_grad_batches=config.gradient_accumulation_steps,
    #     num_sanity_val_steps=0,
    #     enable_checkpointing=True,
    #     default_root_dir=config.save_path,
    #     logger=logger,
    #     callbacks=[LearningRateMonitor(), checkpoint, MyTQDMProgressBar()]
    # )

if __name__ == '__main__':
    main()
