from sklearn.metrics import accuracy_score, recall_score, f1_score, precision_score
import json
import torch
from params import get_config
import models
from dataset import Dataset
from transformers import AdamW
from transformers.optimization import get_scheduler
from models import BertModel
from transformers import BertTokenizer

# 新增依赖项
from rouge_score import rouge_scorer, tokenizers
from sklearn.metrics.pairwise import cosine_similarity
import jieba

# 自定义分词器（保持原有实现）
class JiebaTokenizer(tokenizers.Tokenizer):
    def tokenize(self, text):
        return list(jieba.cut(text))

# 初始化全局组件
scorer = rouge_scorer.RougeScorer(['rougeL'], tokenizer=JiebaTokenizer(), use_stemmer=False)


def read_json(data_file):
    with open(data_file, 'r', encoding='utf-8') as f:
        data = json.load(f)
    return data
def evaluation_target_opinion_stance(model_target, model2_opinion ,dataset,collate_fn_target,collate_fn_opinion,test_batch_size,bert_tokenizer,device):
    # 定义测试数据集加载器
    pre = []
    loader_test = torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=test_batch_size,
                                              collate_fn=collate_fn_target,
                                              shuffle=False,
                                              drop_last=False)

    # 下游任务模型切换到运行模式
    model_target.eval()

    #     print(len(loader_test))
    # 按批次遍历测试集中的数据
    for i, (input_ids, attention_mask, token_type_ids,
            labels) in enumerate(loader_test):
        # 计算
        with torch.no_grad():
            out = model_target(input_ids=input_ids,
                        attention_mask=attention_mask,
                        token_type_ids=token_type_ids)
        out = out.argmax(dim=1)
        out = out.tolist()
        pre += out


    labels = [i[2] for i in dataset]

    correct_indices = []  # 存储正确预测的索引
    correct_samples = []  # 存储正确样本的原始数据

    # 遍历所有预测结果和真实标签
    for idx, (pred_label, true_label) in enumerate(zip(pre, labels)):
        if pred_label == true_label:
            correct_indices.append(idx)
            correct_samples.append(dataset[idx])  # 假设dataset支持下标访问
    # 原始评估逻辑
    acc = accuracy_score(labels, pre)
    acc_nums = accuracy_score(labels, pre, normalize=False)
    print('目标评估的acc:', acc, 'num:', acc_nums)  # 0.8 12
    micro_f1 = f1_score(labels, pre, labels=[0, 1, 2], average='micro')
    macro_f1 = f1_score(labels, pre, labels=[0, 1, 2], average='macro')
    print('目标评估micro_f1:', micro_f1)
    print('目标评估macro_f1:', macro_f1)

    # 执行opinion评估
    if len(correct_samples) > 0:
        evaluate_opinion(correct_samples, model2_opinion, collate_fn_opinion,test_batch_size,bert_tokenizer,device)
        evaluate_stance(correct_samples)
    else:
        print("没有正确预测的target样本，跳过opinion评估")

    # micro_f1_0 = f1_score(labels, pre, labels=[0], average='micro')
    # print('micro_f1_0:', micro_f1_0)
    # micro_f1_1 = f1_score(labels, pre, labels=[1], average='micro')
    # print('micro_f1_1:', micro_f1_1)
    # micro_f1_2 = f1_score(labels, pre, labels=[2], average='micro')
    # print('micro_f1_2:', micro_f1_2)
    # macro_f1 = f1_score(labels, pre, labels=[0, 1, 2], average='macro')
    # print('macro_f1:', macro_f1)

    # macro_f1_1 = f1_score(labels[0:100], pre[0:100], labels=[0, 1, 2], average='macro')
    # print('1macro_f1:', macro_f1_1)
    # macro_f1_2 = f1_score(labels[100:200], pre[100:200], labels=[0, 1, 2], average='macro')
    # print('2macro_f1:', macro_f1_2)
    # macro_f1_3 = f1_score(labels[200:300], pre[200:300], labels=[0, 1, 2], average='macro')
    # print('3macro_f1:', macro_f1_3)
    # macro_f1_4 = f1_score(labels[300:400], pre[300:400], labels=[0, 1, 2], average='macro')
    # print('4macro_f1:', macro_f1_4)
    # macro_f1_5 = f1_score(labels[400:500], pre[400:500], labels=[0, 1, 2], average='macro')
    # print('5macro_f1:', macro_f1_5)
    # print('**************************')
    # micro_f1 = f1_score(labels[0:100], pre[0:100], labels=[0, 1, 2], average='micro')
    # print('1micro_f1:', micro_f1)
    # micro_f1 = f1_score(labels[100:200], pre[100:200], labels=[0, 1, 2], average='micro')
    # print('2micro_f1:', micro_f1)
    # micro_f1 = f1_score(labels[200:300], pre[200:300], labels=[0, 1, 2], average='micro')
    # print('3micro_f1:', micro_f1)
    # micro_f1 = f1_score(labels[300:400], pre[300:400], labels=[0, 1, 2], average='micro')
    # print('4micro_f1:', micro_f1)
    # micro_f1 = f1_score(labels[400:500], pre[400:500], labels=[0, 1, 2], average='micro')
    # print('5micro_f1:', micro_f1)
# 执行后续评估逻辑
def evaluate_opinion(correct_samples, model2_opinion, collate_fn_opinion,test_batch_size,bert_tokenizer,device):
    """这里添加具体的opinion评估逻辑"""
    print(f"\n开始评估 {len(correct_samples)} 个正确样本的opinion...")
    pre = []
    loader_test = torch.utils.data.DataLoader(dataset=correct_samples,
                                              batch_size=test_batch_size,
                                              collate_fn=collate_fn_opinion,
                                              shuffle=False,
                                              drop_last=False)

    # 下游任务模型切换到运行模式
    model2_opinion.eval()

    #     print(len(loader_test))
    # 按批次遍历测试集中的数据
    for i, (input_ids, attention_mask, token_type_ids,
            labels) in enumerate(loader_test):
        # 计算
        with torch.no_grad():
            out = model2_opinion(input_ids=input_ids,
                               attention_mask=attention_mask,
                               token_type_ids=token_type_ids)
        out = out.argmax(dim=1)
        out = out.tolist()
        pre += out

    labels = [i[2] for i in correct_samples]

    # 原始评估逻辑
    acc = accuracy_score(labels, pre)
    acc_nums = accuracy_score(labels, pre, normalize=False)
    print('观点评估的acc:', acc, 'num:', acc_nums)  # 0.8 12
    micro_f1 = f1_score(labels, pre, labels=[0, 1, 2], average='micro')
    macro_f1 = f1_score(labels, pre, labels=[0, 1, 2], average='macro')
    print('观点评估的micro_f1:', micro_f1)
    print('观点评估的macro_f1:', macro_f1)

    print("\n开始OpSim评估...")
    bert_model = BertModel.from_pretrained(bert_tokenizer.name_or_path).to(device)
    opinion_pairs = []
    for sample in correct_samples:
        # 假设dataset中存储了原始观点和生成观点
        opinion_pairs.append({
            "origin": sample[3],  # 原始观点文本
            "generated": sample[4]  # 生成的opinion文本
        })

    # 计算OpSim
    opsim_scores = []
    for pair in opinion_pairs:
        try:
            opsim = calculate_opsim(
                text1=pair["origin"],
                text2=pair["generated"],

                tokenizer=bert_tokenizer,
                model=bert_model,
                device=device
            )
            opsim_scores.append(opsim)
        except Exception as e:
            print(f"计算OpSim时出错：{str(e)}")
            continue

    # 输出结果
    if opsim_scores:
        avg_opsim = sum(opsim_scores) / len(opsim_scores)
        print(f"OpSim平均值：{avg_opsim:.4f} (样本数：{len(opsim_scores)})")
    else:
        print("无有效OpSim计算结果")




def calculate_opsim(text1, text2, tokenizer, model, device, alpha=0.5):
    """
    计算 OpSim 指标
    :param text1: 文本1
    :param text2: 文本2
    :param tokenizer: BERT 分词器
    :param model: BERT 模型
    :param device: 设备（CPU 或 GPU）
    :param alpha: ROUGE-L 的权重
    :return: OpSim 值
    """

    rouge_l = calculate_rouge_l(text1, text2)
    bert_similarity = calculate_bert_similarity(text1, text2, tokenizer, model, device)

    opsim = alpha * rouge_l + (1 - alpha) * bert_similarity
    return opsim

def calculate_rouge_l(text1, text2):
    """
    计算 ROUGE-L 分数
    """
    # 计算 ROUGE-L 分数
    scores = scorer.score(text1, text2)
    return scores['rougeL'].fmeasure

def calculate_bert_similarity(text1, text2, tokenizer, model, device):
    """
    计算 BERT 语义相似度
    """
    # 分词并编码
    inputs1 = tokenizer(text1, return_tensors='pt', padding=True, truncation=True).to(device)
    inputs2 = tokenizer(text2, return_tensors='pt', padding=True, truncation=True).to(device)

    # 获取句向量
    with torch.no_grad():
        outputs1 = model(**inputs1)
        outputs2 = model(**inputs2)

    # 取 [CLS] 位置的向量作为句向量
    sentence_embedding1 = outputs1.last_hidden_state[:, 0, :].cpu().numpy()
    sentence_embedding2 = outputs2.last_hidden_state[:, 0, :].cpu().numpy()

    # 计算余弦相似度
    similarity = cosine_similarity(sentence_embedding1, sentence_embedding2)[0][0]
    return similarity


def evaluate_stance(correct_samples):
    """Stance评估逻辑"""
    print(f"\n开始评估 {len(correct_samples)} 个正确样本的Stance...")

    # 提取真实和预测的stance
    true_stance = [sample[5] for sample in correct_samples]
    pred_stance = [sample[6] for sample in correct_samples]  # 假设数据中有pre_stance字段

    # 计算指标
    acc = accuracy_score(true_stance, pred_stance)
    micro_f1 = f1_score(true_stance, pred_stance, average='micro')
    macro_f1 = f1_score(true_stance, pred_stance, average='macro')

    print(f"[Stance] 准确率: {acc:.4f}")
    print(f"[Stance] Micro F1: {micro_f1:.4f}")
    print(f"[Stance] Macro F1: {macro_f1:.4f}")
if __name__ == '__main__':
    args=get_config()

    data=read_json(args.test_file)
    bert_tokenizer= BertTokenizer.from_pretrained(args.pretrained_weights)
    if args.model_name=="bert":
        model_target=models.aBertModel(args.num_class,args.pretrained_weights)
        model2_opinion = models.aBertModel(args.num_class, args.pretrained_weights)
    else:
        model_target = models.RobertaModel(args.num_class, args.pretrained_weights)
        model2_opinion = models.RobertaModel(args.num_class, args.pretrained_weights)
    model_target.load_state_dict(torch.load(args.save_file_target))
    model2_opinion.load_state_dict(torch.load(args.save_file_opinion))
    model_target.to(args.device)
    model2_opinion.to(args.device)
    dataset=Dataset(data,bert_tokenizer,args.model_name)
    collate_fn_target=dataset.collate_fn_target
    collate_fn_opinion=dataset.collate_fn_opinion
    evaluation_target_opinion_stance(model_target,model2_opinion,dataset,collate_fn_target=collate_fn_target,collate_fn_opinion=collate_fn_opinion,test_batch_size=args.test_batch_size,bert_tokenizer=bert_tokenizer,device=args.device)
    '''
python /data/wangzihao/stance/StanceDetectionLab/labEvaluation/evaluation/evaluation.py --test_file "/data/wangzihao/stance/StanceDetectionLab/labEvaluation/data/train_data_opinion.json" --pretrained_weights "/data/wangzihao/model/bert-base-chinese/" --save_file_target "/data/wangzihao/model_save_files/bert_target" --save_file_opinion "/data/wangzihao/model_save_files/bert_opinion"
'''