# 显然只有finetuned_bert_sentiment把模型放到了gpu上面
#huggingface的模型也是要model.to(device)的
from transformers import BertTokenizer, BertForSequenceClassification, pipeline
from torch.nn.functional import softmax
import os
import torch
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

device="cuda:0"
k=1
def bert_sentiment(text:str):
    #默认本地已经下载好了模型哈！
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese',local_files_only=True)
    model = BertForSequenceClassification.from_pretrained('bert-base-chinese', num_labels=3,local_files_only=True)  # 假设有3个情感类别：负、中、正
    print(tokenizer(text, return_tensors="pt"))
    raw_result = model(**tokenizer(text, return_tensors="pt"))[0]
    probabilities = softmax(raw_result, dim=1).tolist()[0]
    factor=probabilities[0]*(-1)+probabilities[2]*1
    #0,1,2 负中正
    return probabilities,factor


tokenizer = BertTokenizer.from_pretrained('hw2942/bert-base-chinese-finetuning-financial-news-sentiment-v2',local_files_only=True)
model = BertForSequenceClassification.from_pretrained('hw2942/bert-base-chinese-finetuning-financial-news-sentiment-v2', num_labels=3,local_files_only=True)  # 假设有3个情感类别：负、中、正
model.to(device)
def finetuned_bert_sentiment(text:str):  

    """
    输入文本，输出（情感概率分布，情感值）
    """  
    encoded_input = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
    input_ids = encoded_input['input_ids'].to(device)
    attention_mask = encoded_input['attention_mask'].to(device)
    with torch.no_grad():
        raw_result= model(input_ids, attention_mask=attention_mask)[0]
        global k
        print(f"第{k}次情感分析完成")
        k+=1
        #print(raw_result)  
        probabilities = softmax(raw_result, dim=1).tolist()[0]
        factor=probabilities[0]*(-1)+probabilities[2]*1
    #0,1,2 负中正
    return probabilities,factor

def bert_customed(text:str):
    #默认本地已经下载好了模型哈！
    tokenizer = BertTokenizer.from_pretrained('./SFT_model',local_files_only=True)
    model = BertForSequenceClassification.from_pretrained('./SFT_model', num_labels=2,local_files_only=True)  # 假设有3个情感类别：负、中、正
    raw_result = model(**tokenizer(text, return_tensors="pt"))[0]
    #0,1,2 负中正
    print(raw_result)
    return raw_result
out = softmax(bert_customed("怎么了"),dim=1).tolist()[0]
print(out.index(max(out)))

