
from transformers import AutoModelForCausalLM,TrainingArguments, AutoTokenizer , Trainer,HfArgumentParser
import torch

from peft import get_peft_model
import os
import sys
sys.path.append(".")
from finetune import lo_conf

from datasets import load_dataset
# 配置随机种子
torch.manual_seed(2618)
import os
os.environ['WANDB_MODE'] = 'offline'
import torch.nn.functional as F
def data_formator(q, tokenizer, max_length, prefix_token=None, suffix_token=None, middle_token=None, eos_token=None, **kwargs):

    q_ = [
        f"{tokenizer.prefix_token}{tken[-512:]}{tokenizer.suffix_token}{q['suffix'][px][:512]}{tokenizer.middle_token}{q['mid'][px][:512]}{tokenizer.eos_token}"
        for px, tken in enumerate(q['prefix'])
    ]
    
    out = tokenizer.batch_encode_plus(q_, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt")
    inpx = out["input_ids"]
    flash = out["attention_mask"]
    # 用 relu 实现将 0 值替换为 0 的操作
    # attention_mask = F.relu(input_ids) * attention_mask
    flash = torch.where(inpx == 0, torch.zeros_like(flash), flash)
    incl = inpx.clone()
    return {
        "input_ids": inpx,
        "attention_mask": flash,
        "labels": incl
    }

def beginmod(moddate, peft_config=None):
    a = AutoModelForCausalLM.from_pretrained(moddate.pretrained_model_name_or_path, trust_remote_code=True)
    return a

def beginwords(moddate, special_tokens=None):
    tokenizer = AutoTokenizer.from_pretrained(moddate.pretrained_model_name_or_path)
    tokenizer.pad_token_id = tokenizer.unk_token_id

    return tokenizer

# 初始化Trainer
def begintr(md, traindate: TrainingArguments, train_dataset, eval_dataset, tokenizer, collator=None) -> Trainer:
    return Trainer(
        model=md,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        args=traindate,
        tokenizer=tokenizer,

        data_collator=collator or (lambda data: {"input_ids": torch.tensor(([x["input_ids"] for x in data])), 
                                    "attention_mask": torch.tensor(([x["attention_mask"] for x in data])), 
                                    "labels": torch.tensor(([x["labels"] for x in data]))})
    )


import logging
from datasets import load_dataset
from tqdm import tqdm
import os

def dataload(datatate, tokenizer, debug=False, data_format='parquet', custom_preprocessor=None):
    """
    加载并处理数据，包括异常处理、日志记录、进度条和自定义预处理步骤。

    参数:
    - data_args: 包含数据路径和配置的对象。
    - tokenizer: 用于处理文本的tokenizer。
    - debug: 是否启用调试模式，打印更多信息。
    - data_format: 数据文件的格式（默认为 'parquet'）。
    - custom_preprocessor: 可选的自定义数据预处理函数。

    返回:
    - train_dataset: 处理后的训练数据集。
    - eval_dataset: 处理后的验证数据集。
    """
    
    # 初始化日志
    logging.basicConfig(level=logging.INFO)
    
    # 打印调试信息
    if debug:
        logging.debug(f"Data arguments: {vars(datatate)}")
    
    try:
        # 加载训练数据集
        logging.info("Loading train dataset...")
        if data_format == 'parquet':
            train_dataset = load_dataset('parquet', data_files={'train': datatate.train_dataset_path.split(',')})['train']
        elif data_format == 'csv':
            train_dataset = load_dataset('csv', data_files={'train': datatate.train_dataset_path.split(',')})['train']
        else:
            raise ValueError(f"Unsupported data format: {data_format}")

        # 加载评估数据集
        logging.info("Loading eval dataset...")
        eval_dataset = load_dataset('parquet', data_files={'train': datatate.eval_dataset_path})['train']

        # 如果有自定义的预处理函数，应用它
        if custom_preprocessor:
            logging.info("Applying custom preprocessor...")
            train_dataset = custom_preprocessor(train_dataset)
            eval_dataset = custom_preprocessor(eval_dataset)

        # 使用map方法处理数据集
        logging.info("Processing datasets...")
        data_cfg_dict = vars(datatate)

        # 进度条
        train_dataset = train_dataset.map(data_formator, batched=True, num_proc=datatate.num_data_proc, 
                                           fn_kwargs={ "tokenizer": tokenizer, **data_cfg_dict },
                                           desc="Processing training data")
        eval_dataset = eval_dataset.map(data_formator, batched=True, num_proc=datatate.num_data_proc,
                                        fn_kwargs={ "tokenizer": tokenizer, **data_cfg_dict },
                                        desc="Processing evaluation data")
        
        # 返回处理后的数据集
        return train_dataset, eval_dataset
    
    except FileNotFoundError as e:
        logging.error(f"File not found: {e}")
        raise
    except ValueError as e:
        logging.error(f"Value error: {e}")
        raise
    except Exception as e:
        logging.error(f"An unexpected error occurred: {e}")
        raise


from load_ini import load_args_from_ini
def train_mod():
    # parser = HfArgumentParser((ModelArgumnets, Datarguments,  MyTrainingArguments))
    # model_arg, data_arg, train_arg = parser.parse_args_into_dataclasses()
    moddate, datadate, traindate = load_args_from_ini("train_config.ini")
    # print(moddate)
    # print(datadate)
    # print(traindate)
    #加载model和tokenizer
    md = beginmod(moddate)
    tokenizer = beginwords(moddate)
    md = get_peft_model(md, lo_conf)
    
    # 加载和处理数据
    train_dataset, eval_dataset = dataload(datadate, tokenizer)
    # 初始化Trainer
    q = begintr(md, traindate, train_dataset, eval_dataset, tokenizer)
        
    q.train()

