from transformers import AutoModelForSequenceClassification,AutoTokenizer,DataCollatorWithPadding
from datasets import load_dataset
import torch
from torch.utils.data import DataLoader,Dataset,random_split
import pandas as pd
import pathlib
from torch.optim import Adam
from rich import print
import evaluate

# accuracy = evaluate.load("accuracy")
# accuracy



device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)

file_path = pathlib.Path(__file__).parent.joinpath('data').joinpath('ChnSentiCorp_htl_all.csv')
save_model_path = pathlib.Path(__file__).parent.joinpath('models').joinpath('save_dataset.pt')
model_floder = r'D:\Models\rbt3'
# model_floder = 'hfl/rbt3'



d = load_dataset('csv',data_files=file_path.resolve().__str__(),split='train')
data_init = d.filter(lambda x:x['review'] is not None)


# class MyData(Dataset):
#     def __init__(self):
#         super().__init__()
#         self.data = pd.read_csv(file_path)
#         self.data = self.data.dropna()
#     def __getitem__(self, index):
#         return self.data.iloc[index]['review'],self.data.iloc[index]['label']
#     def __len__(self) -> int:
#         return len(self.data)
    

# d = MyData()
data_init = data_init.train_test_split(test_size=0.1)


# total_length = len(data_init)
# train_length = int(total_length * 0.9)
# test_length = total_length - train_length  # 确保总和等于数据集长度
# all_dataset = data_init.random_split(data_init,lengths=[train_length,test_length])
# print(len(train),len(test))

tokenizer = AutoTokenizer.from_pretrained(model_floder)


# def collate_fn_batch(batch):
#     inputs = []
#     labels = []
#     for b in batch:
#         inputs.append(b[0])
#         labels.append(b[1])
#     outputs = tokenizer(inputs,max_length=128,padding=True,truncation=True,return_tensors='pt')
#     outputs['labels'] = torch.tensor(labels)
#     return outputs

def collate_fn_batch(batch):
    # inputs = []
    # labels = []
    # for b in batch:
    #     inputs.append(b[0])
    #     labels.append(b[1])
    outputs = tokenizer(batch['review'],max_length=128,truncation=True)
    outputs['labels'] = torch.tensor(batch['label'])
    return outputs

data_init_all = data_init.map(collate_fn_batch,remove_columns=d.column_names)
trainset, validset = data_init_all["train"], data_init_all["test"]


collactor = DataCollatorWithPadding(tokenizer=tokenizer)
data_train = DataLoader(trainset,batch_size=64,shuffle=True,collate_fn=collactor)
data_test = DataLoader(validset,batch_size=64,shuffle=True,collate_fn=collactor)

# next(iter(train_d))
# data_train = DataLoader(train,shuffle=True,batch_size=64,collate_fn=collate_fn_batch)
# data_test = DataLoader(train,shuffle=True,batch_size=200,collate_fn=collate_fn_batch)

# tmp_d = next(iter(data_train))
# print(tmp_d)

# 加载模型

model = AutoModelForSequenceClassification.from_pretrained(model_floder)
# print(res)

opt = Adam(model.parameters(),lr=2e-5)

def evaluate_meterial(model,data_test):
    # 计算准确率
    # clf_metrics = evaluate.combine(["accuracy", "f1", "recall", "precision"])
    clf_metrics = evaluate.load("accuracy")
    
    sum_ = 0
    currancy_value = 0
    print('all evaluate',len(data_test))
    for batch in data_test:
        res = model(**batch)
        predict = res.logits
        predict_values = torch.argmax(predict, dim=-1)
        reference = batch['labels']
        # print(predict_values,reference)
        clf_metrics.add_batch(references=reference,predicts=predict_values)
        diff = predict_values.long() == reference.long()
        currancy_value += diff.sum().item()
        sum_ +=len(diff)
        # print('sub_accuracy',round(currancy_value/sum_,4))
        print('sub_clf_metrics',clf_metrics.compute())
        
    print('accuracy',round(currancy_value/sum_,4))
    print('sub_clf_metrics',clf_metrics.compute()) 
        

def train(model=model,opt=opt,epochs=3,data_train=data_train,data_test=data_test):
    model.train()
    model.to(device)
    loss=[]
    print('总共的轮次',len(data_train))
    for epoch in range(epochs):
        for index,batch in enumerate(data_train):
            opt.zero_grad()
            
            batch = {k:v.to(device) for k,v in batch.items()}
            
            res = model(**batch)
            res.loss.backward()
            opt.step()
            loss.append(res.loss.item())
            if index % 10==0:
                print('loss',round(sum(loss)/len(loss),4))
                loss=[]
            if index!=0 and index % 30 == 0:
            #     evaluate_meterial(model,data_test)
            #     print('开始保存了')
                torch.save(model,save_model_path)
                
    

def predict(data_test=data_test):
    # 预测
    model_path = save_model_path
    model = torch.load(model_path,weights_only=False)
    evaluate_meterial(model,data_test)
    

if __name__ == "__main__":
    # train()
    predict()
    
    
    
    

    
    
    
    
    










