# -*- coding: utf-8 -*-
"""
@Time ： 2024/3/29 23:57
@Auth ： fcq
@File ：process_sentiment_augment_VAST.py
@IDE ：PyCharm
@Motto：ABC(Always Be Coding)
"""
from transformers import AutoModelForSequenceClassification
from transformers import TFAutoModelForSequenceClassification
from transformers import AutoTokenizer, AutoConfig
import numpy as np
from tqdm import tqdm
import torch
from scipy.special import softmax
import json


# Preprocess text (username and link placeholders)
# 关于立场，可能0是反对，1是支持，2是中立
def preprocess(text):
    new_text = []
    for t in text.split(" "):
        t = '@user' if t.startswith('@') and len(t) > 1 else t
        t = 'http' if t.startswith('http') else t
        new_text.append(t)
    return " ".join(new_text)


MODEL = '../../../bot-detection/BERT_PretrainModel/twitter_roberta_base_sentiment_latest'
# MODEL = "D:/Project/bot-detection/BERT_PretrainModel/twitter_roberta_base_sentiment_latest"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
config = AutoConfig.from_pretrained(MODEL)
# PT
model = AutoModelForSequenceClassification.from_pretrained(MODEL).cuda()
# model.save_pretrained(MODEL)

with open('../VAST/vast_dev.csv.json', 'r', encoding='utf-8') as f:
    data = json.load(f)

new_list = []
model.eval()
with torch.no_grad():
    for sub_data in tqdm(data):
        text = sub_data['text']
        label = sub_data['label']
        text_new = preprocess(text)
        encoded_input = tokenizer(text, return_tensors='pt')
        encoded_input['input_ids'] = encoded_input['input_ids'].cuda()
        encoded_input['attention_mask'] = encoded_input['attention_mask'].cuda()
        output = model(**encoded_input)
        scores = output[0][0].detach()
        scores = int(torch.argmax(scores, -1))
        if scores == 1:
            scores = 2
        elif scores == 2:
            scores = 1
        sub_data['senti_score'] = scores
        if scores == label:
            sub_data['cross_senti'] = 1
        else:
            sub_data['cross_senti'] = 0
        new_list.append(sub_data)

with open('../VAST/vast_dev_cross_senti.json', 'w', encoding='utf-8') as f:
    json.dump(new_list, f, ensure_ascii=False)

# text = "Covid cases are increasing fast!"
# text = preprocess(text)
#
#
#
# encoded_input = tokenizer(text, return_tensors='pt')
# output = model(**encoded_input)
# scores = output[0][0].detach()
# scores = int(torch.argmax(scores, -1))
# l = config.id2label[0] # 0 表示negative
# l = config.id2label[1] # 1 表示neutral
# l = config.id2label[2] # 2 表示positive
#
# # # TF
# # model = TFAutoModelForSequenceClassification.from_pretrained(MODEL)
# # model.save_pretrained(MODEL)
# # text = "Covid cases are increasing fast!"
# # encoded_input = tokenizer(text, return_tensors='tf')
# # output = model(encoded_input)
# # scores = output[0][0].numpy()
# # scores = softmax(scores)
# # Print labels and scores
# ranking = np.argsort(scores)
# ranking = ranking[::-1]
# for i in range(scores.shape[0]):
#     l = config.id2label[ranking[i]]
#     s = scores[ranking[i]]
#     print(f"{i+1}) {l} {np.round(float(s), 4)}")
