import os
import copy
import logging
from dataclasses import dataclass, field
from typing import Dict, Optional, Sequence, List
import json
import random

import torch
import torch.distributed as dist
import transformers

from torch.utils.data import Dataset
from transformers import Trainer, AutoConfig
from transformers import EvalPrediction
from transformers import LlamaForCausalLM, Trainer
from peft import LoraConfig, TaskType, get_peft_model

from model import LlamaRewardModel


from llm_datasets import TextLlmDataset, llm_data_collactor, load_text_dataset
from arguments import CustomTrainingArguments
from trainer import RewardModelTrainer, RewardModelTrainer_v1, LLMModelTrainener

from utils import print_rank_0, set_reward_tokenizer, merge_json_or_jsonl_data
from utils import DEFAULT_PAD_TOKEN, DEFAULT_BOS_TOKEN, DEFAULT_EOS_TOKEN, DEFAULT_UNK_TOKEN
from utils import QUERY_PROMPT, SEP_TOKEN, STRING_SEP, INFER_TMP_FILE


def get_train_dataset(args):    
    all_train_data = []
    for train_data_path in args.train_data_path:
        train_data = load_text_dataset(args=args, data_path=train_data_path)
        all_train_data.extend(train_data)

    if args.debug_mode:
        print_rank_0(f">>> check loaded data:")        
        print_rank_0(f">>> {all_train_data[0]}")

    train_set = TextLlmDataset(all_train_data)
    return train_set


def train():
    parser = transformers.HfArgumentParser(CustomTrainingArguments)
    args = parser.parse_args_into_dataclasses()[0]
    print_rank_0(args)

    # load data
    #---------------------------------------------------------------------------------
    if args.do_train:
        train_dataset = get_train_dataset(args)
    else:
        train_dataset = None

    # setup model
    #---------------------------------------------------------------------------------
    print_rank_0(f"Begin loading model from {args.model_name_or_path}")
    model = LlamaForCausalLM.from_pretrained(args.model_name_or_path)
        
    print_rank_0(model)
    print_rank_0(f"Finished loading model from {args.model_name_or_path}")

    model.is_parallelizable = True
    model.model_parallel = True

    # setup lora config
    #---------------------------------------------------------------------------------
    peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1)
    model = get_peft_model(model, peft_config)

    # setup tokenizer
    #---------------------------------------------------------------------------------
    tokenizer = transformers.AutoTokenizer.from_pretrained(
        args.model_name_or_path,      
        model_max_length=args.max_length,        
        padding_side=args.padding_side,
        truncation_side=args.truncation_side,
        use_fast=False,
    )
    
    model, tokenizer = set_reward_tokenizer(model=model, tokenizer=tokenizer)

    # build trainer
    #---------------------------------------------------------------------------------
    trainer = LLMModelTrainener(
        model=model, 
        tokenizer=tokenizer, 
        args=args,
        compute_metrics=None,
        train_dataset=train_dataset,
        eval_dataset=None,
        data_collator=lambda x: llm_data_collactor(args, x, tokenizer)
    )

    if args.do_train:
        with torch.autocast("cuda"): 
            if args.resume_from_checkpoint:
                train_result = trainer.train(resume_from_checkpoint=args.resume_from_checkpoint)
            else:
                train_result = trainer.train()

        metrics = train_result.metrics
        trainer.log_metrics("train", metrics)
        trainer.save_metrics("train", metrics)

        trainer.save_state()
        trainer.save_model(output_dir=args.output_dir)

if __name__ == "__main__":
    train()
