import os
import csv
import copy
from dataclasses import dataclass
from typing import Dict, Any, Optional, Union
import datetime

import torch
from torch.utils.data import Dataset
from transformers import logging
from transformers import PreTrainedTokenizer,PreTrainedTokenizerBase
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments
from transformers.utils import PaddingStrategy
from transformers import GPT2LMHeadModel,AutoTokenizer

os.environ["WANDB_MODE"] = "offline" # wandb离线
os.environ["WANDB_MODE"] = "dryrun"

# # 获取当前脚本的目录路径
project_dir = os.path.dirname(os.path.abspath(__file__))

cur_time = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
# train_from_LLM-Detect_AI-GT_1MNB-3SGD.csv
# 
# 定义一些必要的参数
gpt2_model_name_or_path = 'gpt2'
train_path = [os.path.join(project_dir,'data/train_drcat_01.csv'),
              os.path.join(project_dir,'data/train_drcat_02.csv'),
              os.path.join(project_dir,'data/train_drcat_03.csv'),
              os.path.join(project_dir,'data/train_drcat_04.csv'),
              os.path.join(project_dir,'data/train_v2_drcat_02.csv'),
              os.path.join(project_dir,'data/ai_generated_train_essays_gpt-4.csv'),
              os.path.join(project_dir,'data/ai_generated_train_essays.csv'),
              os.path.join(project_dir,'data/train_from_LLM-Detect_AI-GT_1MNB-3SGD.csv'),
              os.path.join(project_dir,'data/final_train.csv')
              ]
output_path=os.path.join(project_dir,f'pretrained_models/gpt2_pretrained_{cur_time}')
batch_size = 32
epochs = 3
lr = 5e-5

logger = logging.get_logger(__name__)

class PretrainDialogDataset(Dataset):
    """
    This will be superseded by a framework-agnostic approach soon.
    """

    def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str or list, block_size: int):
            
        logger.info(f"Creating features from dataset file at {file_path}")
        input_ids = []
        if type(file_path) is str:
            with open(file_path, encoding="utf-8") as f:
                reader = csv.reader(f)
                header = next(reader)
                text_idx = header.index('text')
                if 'label' in header:
                    label_idx = header.index('label')
                elif 'generated' in header:
                    label_idx = header.index('generated')
                else:
                    label_idx = None
                for line in reader:
                    if line[label_idx] == '0': continue
                    _input_ids = tokenizer.encode(line[text_idx], truncation=True, max_length=block_size)
                    input_ids.append(_input_ids)
        elif type(file_path) is list:
            for _file_path in file_path:
                with open(_file_path, encoding="utf-8") as f:
                    reader = csv.reader(f)
                    header = next(reader)
                    text_idx = header.index('text')
                    if 'label' in header:
                        label_idx = header.index('label')
                    elif 'generated' in header:
                        label_idx = header.index('generated')
                    else:
                        label_idx = None
                    for line in reader:
                        if line[label_idx] == '0': continue
                        _input_ids = tokenizer.encode(line[text_idx], truncation=True, max_length=block_size)
                        input_ids.append(_input_ids)
        labels = copy.deepcopy(input_ids)

        self.examples = [{"input_ids": _input_ids,
                          "labels": _labels}
                            for _input_ids,_labels in zip(input_ids,labels)]

    def __len__(self):
        return len(self.examples)

    def __getitem__(self, i) -> Dict[str, torch.tensor]:
        return self.examples[i]


@dataclass
class DataCollatorForDialog:
    tokenizer: PreTrainedTokenizerBase
    model: Optional[Any] = None
    padding: Union[bool, str, PaddingStrategy] = True
    max_length: Optional[int] = None
    pad_to_multiple_of: Optional[int] = None
    label_pad_token_id: int = -100
    return_tensors: str = "pt"

    def __call__(self, features, return_tensors=None):
        """
            Args: features[batch_data]:({'source_input_ids':[],'target_input_ids':[]},{},...)
            return {'input_ids':[],'attention_mask','labels':[]}
        """
        if return_tensors is None:
            return_tensors = self.return_tensors
        input_ids = [item['input_ids'] for item in features]
        attention_mask = []
        labels = [item['labels'] for item in features]

        input_max_length = max([len(item) for item in input_ids])
        label_max_length = max([len(item) for item in labels])
        
        for _input_ids, _labels in zip(input_ids, labels):
            attention_mask.append([1] * len(_input_ids) + [0] * (input_max_length - len(_input_ids)))
            _input_ids.extend([self.tokenizer.pad_token_id] * (input_max_length - len(_input_ids)))
            _labels.extend([-100] * (label_max_length - len(_labels)))

        ans = {
            'input_ids': torch.LongTensor(input_ids),
            'attention_mask': torch.LongTensor(attention_mask),
            'labels': torch.LongTensor(labels)
        }
        return ans

# 定义 tokenizer,model
tokenizer = AutoTokenizer.from_pretrained(gpt2_model_name_or_path)
model = GPT2LMHeadModel.from_pretrained(gpt2_model_name_or_path)
if tokenizer.pad_token is None:
    tokenizer.pad_token_id = tokenizer.eos_token_id
    tokenizer.pad_token = tokenizer.eos_token

# 定义数据集
train_dataset = PretrainDialogDataset(
    tokenizer=tokenizer,
    file_path=train_path,
    block_size=400  # 这里的block_size决定了输入数据的长度
)

# 定义数据收集器
data_collator = DataCollatorForDialog(
    tokenizer=tokenizer
)

# 定义初始化类
config = Seq2SeqTrainingArguments(
    output_dir=output_path,  # 输出目录
    overwrite_output_dir=True,
    num_train_epochs=epochs,  # 训练的轮数
    per_device_train_batch_size=batch_size,  # 每个设备的训练批次大小
    per_device_eval_batch_size=1, # 最好设置1，pad符号可能会影响预测
    save_total_limit=2,
    save_steps=50000,
    learning_rate=lr,
    remove_unused_columns=False,
    evaluation_strategy='no',
)

# 创建Trainer对象并开始训练
trainer = Seq2SeqTrainer(
    model=model,
    args=config,
    data_collator=data_collator,
    train_dataset=train_dataset
)

# 模型训练
trainer.train()
trainer.save_model(output_path)