from dataclasses import dataclass
from typing import Any
from transformers import BertTokenizer, BertForNextSentencePrediction,Trainer,TrainingArguments,DataCollatorWithPadding
from torch.utils.data import Dataset,DataLoader
from functools import partial
import json
import numpy as np
import torch
from modeling import BertForSentencePrediction

# for param in model.bert.parameters():
#     param.requires_grad = False

# with open('output.jsonl','r',encoding='utf-8') as f:
#     total_data=list(map(partial(json.loads),f.readlines()))


# print(len(total_data))
# train_data=total_data[:205000]
# test_data=total_data[205000:]




# print(test_data)
# class TrainDataset(Dataset):
#     def __init__(self,trainData,tokenizer):
#         self.data=trainData
#         self.tokenizer=tokenizer

#     def __getitem__(self, index) :
#         d=self.data[index]

#         return {'input':[d['pair1'],d['pair2']],'label':d['label']}
    
#     def __len__(self):
#         return len(self.data)
# train_dataset=TrainDataset(train_data,tokenizer)
# eval_dataset=TrainDataset(test_data,tokenizer)

@dataclass
class DataCollator:
    tokenizer : BertTokenizer
    max_length:int
    padding:bool=True
    def __call__(self, batch):

        inputs=[x['input'] for x in batch]
        labels=[x['label'] for x in batch]
        input_ids=self.tokenizer(
            inputs,
            return_tensors="pt",
            truncation=True,
            max_length=self.max_length,
            padding=True,
        )
        labels=torch.tensor(labels)
        return {**input_ids,'labels':labels}

def class_name_to_int(s):
    if s==0 or s=='nan':
        return 0
    if r'/' in s:
        s=s.split('/')[0]
    d={'最低分':1,'录取人数':2,'最低位次':3,}
    return d[s]

def compute_metrics(eval_preds):
    # print('dnasodjsaoi')
    num_predict = 0
    num_correct = 0
    for predict, label in zip(eval_preds.predictions, eval_preds.label_ids):
        # print(predict,label)
        num_predict += 1
        if np.argmax(predict).item()==label:
            num_correct += 1
        # print(np.argmax(predict).item(),label)
    return {'accuracy': num_correct / num_predict}


import pandas as pd
# 读取整个 Excel 文件
df = pd.read_excel('/home/lxy/multiR/NSPtraining/测试集.xlsx')
df=df.fillna(0)
# 选择特定列范围（例如，从第A列到第C列）
specific_columns_eval = df.loc[:,'问题']
answer_columns_eval=[class_name_to_int(x) for x in df.loc[:,'标签']]

import json
with open('/home/lxy/multiR/NSPtraining/高报分类.jsonl',encoding='utf-8') as f:
    data=list(map(json.loads,f.readlines()))
    question_list=[x['question'] for x in data]
    answer_list=[class_name_to_int(x['label']) for x in data]
    
class TrainDataset(Dataset):
    def __init__(self,trainData):
        self.data=trainData

    def __getitem__(self, index) :
        d=self.data[index]

        return {'input':d[0],'label':d[1]}
    
    def __len__(self):
        return len(self.data)



train_data=list(zip(question_list,answer_list))
eval_data=list(zip(specific_columns_eval,answer_columns_eval))

train_dataset=TrainDataset(train_data)
eval_dataset=TrainDataset(eval_data)

    
tokenizer = BertTokenizer.from_pretrained("/data/lxy/thenlper/gte-large-zh")
model=BertForSentencePrediction.from_pretrained("/data/lxy/thenlper/gte-large-zh")

train_argument=TrainingArguments(
    output_dir='bge_w_nsp',
    do_train =True,
    auto_find_batch_size =True,
    # per_device_train_batch_size=16,
    # per_device_eval_batch_size=16,
    evaluation_strategy  ='epoch',
    # eval_steps =100,
    gradient_accumulation_steps =1,
    num_train_epochs=30,
    lr_scheduler_type = "cosine",
    save_strategy ="epoch",
    remove_unused_columns=False,
    fp16=True,
    metric_for_best_model = 'accuracy',
    greater_is_better =True,
    save_total_limit =3
)
trainer=Trainer(
    model=model,
    args=train_argument,
    train_dataset =train_dataset,
    eval_dataset =eval_dataset,
    tokenizer =tokenizer,
    # data_collator =DataCollatorWithPadding(
        # tokenizer=tokenizer,
        # max_length =512,
        # padding =True,
        # ),
    data_collator=DataCollator(
        tokenizer=tokenizer,
        max_length =512,
        padding =True,
        ),
    compute_metrics=compute_metrics
    
)

# trainer.get_train_dataloader=fuckyou

trainer.train()
