from dataclasses import dataclass
from typing import Any
from transformers import BertTokenizer, BertForNextSentencePrediction,Trainer,TrainingArguments,DataCollatorWithPadding
from torch.utils.data import Dataset,DataLoader
from functools import partial
import json
import torch
from torch.nn.functional import softmax
from modeling import BertForSentencePrediction

import os


def compute_metrics(predictions,label_ids):
    # print('dnasodjsaoi')
    # num_predict = 0
    # num_correct = 0
    # for predict, label in zip(predictions,label_ids):
    #     # print(predict,label)
    #     num_predict += 1
    #     if predict==label:
    #         num_correct += 1
    #     # print(np.argmax(predict).item(),label)
    ans=predictions.eq(label_ids).sum()
    
    return ans

class myDataset(Dataset):
    def __init__(self,specific_columns,answer_columns) -> None:
        super().__init__()
        self.specific_columns=specific_columns
        self.answer_columns=answer_columns
    
    def __getitem__(self, index) -> Any:
        return {'question':specific_columns[index],'label':answer_columns[index]}
    
    def __len__(self):
        return len(self.specific_columns)

def collator(batch,tokenizer):
    question=[x['question'] for x in batch]
    label=[x['label'] for x in batch]
    # print(question,label)
    return {'question':tokenizer(question,return_tensors="pt",padding=True,).to('cuda')
            ,'label':torch.LongTensor(label).to('cuda')}

folder_list=os.listdir('/home/lxy/multiR/NSPtraining/bge_w_nsp')
for folder in folder_list:
    folder=os.path.join('/home/lxy/multiR/NSPtraining/bge_w_nsp/',folder)
    tokenizer = BertTokenizer.from_pretrained(folder)
    model=BertForSentencePrediction.from_pretrained(folder,torch_dtype=torch.float16).to_bettertransformer()
    model.cuda()
    
    def class_name_to_int(s):
        if s==0 or s=='nan':
            return 0
        if r'/' in s:
            s=s.split('/')[0]
        d={'最低分':1,'录取人数':2,'最低位次':3,}
        return d[s]

    # 从excel里读取========================================
    import pandas as pd
    # 读取整个 Excel 文件
    df = pd.read_excel('/home/lxy/multiR/NSPtraining/测试集.xlsx')
    df=df.fillna(0)
    # 选择特定列范围（例如，从第A列到第C列）
    specific_columns = df.loc[:,'问题']
    answer_columns=[class_name_to_int(x) for x in df.loc[:,'标签']]
    # 从excel里读取========================================

    # # 从jsonl里读取========================================
    # import json
    # with open('/home/lxy/multiR/NSPtraining/高报分类.jsonl',encoding='utf-8') as f:
    #     data=list(map(json.loads,f.readlines()))
    #     specific_columns=[x['question'] for x in data]
    #     answer_columns=[class_name_to_int(x['label']) for x in data]
    # # 从jsonl里读取========================================

    # wrong_cnt=0
    # for i,question in enumerate(specific_columns):
    #     inputs=tokenizer(question,return_tensors='pt').to('cuda')
    #     ans=model(**inputs)

    #     logits = torch.argmax(ans.logits,dim=-1)
    #     # print(logits)

    #     # # excel的
    #     # if logits!=class_name_to_int(answer_columns[i]):
    #     #     wrong_cnt+=1

    #     # jsonl的
    #     if logits!=answer_columns[i]:
    #         wrong_cnt+=1

    # print(folder.split('/')[-1],f'{wrong_cnt}/{len(specific_columns)}')
        
    # 批量解码
    with torch.inference_mode():
        acc=0
        dataset=myDataset(specific_columns,answer_columns)
        dataloader=DataLoader(dataset,64,collate_fn=partial(collator,tokenizer=tokenizer))
        for batch in dataloader:
            inputs=batch['question']
            ans=model(**inputs)
            labels=batch['label']
            logits = torch.argmax(ans.logits,dim=-1)
            # print(logits)
            # jsonl的
            acc+=compute_metrics(logits,labels)
    print(folder.split('/')[-1],f'{acc}/{len(specific_columns)}')




# logits = softmax(ans.logits,dim=-1)
# print(logits)
