import nltk
from nltk.util import ngrams
import json
import nltk
from nltk.translate.bleu_score import sentence_bleu
from itertools import combinations

# 确保已经下载必要的 NLTK 数据
nltk.download('punkt')

def compute_self_bleu(sentences, n_gram=4):
    """
    计算一组句子的 Self-BLEU。
    :param sentences: 列表，包含句子字符串
    :param n_gram: 使用的 n-gram 蓝图分数，默认为4
    :return: 平均 Self-BLEU 分数
    """
    weights = {
        1: (1.0, 0, 0, 0),
        2: (0.5, 0.5, 0, 0),
        3: (0.33, 0.33, 0.33, 0),
        4: (0.25, 0.25, 0.25, 0.25)
    }
    chosen_weights = weights.get(n_gram, weights[4])
    
    # 句子的所有组合
    sentence_pairs = combinations(sentences, 2)
    
    bleu_scores = []
    for ref, hyp in sentence_pairs:
        ref_tokens = [nltk.word_tokenize(ref)]
        hyp_tokens = nltk.word_tokenize(hyp)
        # 计算 BLEU 分数
        score = sentence_bleu(ref_tokens, hyp_tokens, weights=chosen_weights)
        bleu_scores.append(score)
    
    # 计算平均 BLEU 评分
    return sum(bleu_scores) / len(bleu_scores) if bleu_scores else 0

def calculate_distinct_n(sentences, n):
    """
    Calculate Distinct-n for a list of sentences.
    
    Args:
        sentences (list of str): List of sentences.
        n (int): n-gram size (e.g., 1 for Distinct-1, 2 for Distinct-2).
    
    Returns:
        float: Distinct-n value.
    """
    # Tokenize sentences
    tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]
    
    # Collect all n-grams
    all_ngrams = []
    for tokens in tokenized_sentences:
        all_ngrams.extend(list(ngrams(tokens, n)))
    
    # Calculate Distinct-n
    unique_ngrams = set(all_ngrams)
    distinct_n = len(unique_ngrams) / len(all_ngrams) if all_ngrams else 0.0
    return distinct_n

'''
# Example sentences
sentences = [
    "The quick brown fox jumps over the lazy dog.",
    "A fast brown fox leaps over a lazy dog.",
    "The quick red fox jumps over a sleeping dog.",
    "A speedy fox swiftly jumps over the lazy hound.",
    "A fox jumps over the dog quickly and gracefully."
]

# Calculate Distinct-1 and Distinct-2
distinct_1 = calculate_distinct_n(sentences, 1)
distinct_2 = calculate_distinct_n(sentences, 2)

print(f"Distinct-1: {distinct_1:.4f}")
print(f"Distinct-2: {distinct_2:.4f}")
'''

with open("datasets/Llama3_ultrafeedback_SPCE_round2/output_10.json", 'r', encoding='utf-8') as file:
    # 加载JSON数据
    all_data_13 = json.load(file)
with open("datasets/Llama3_ultrafeedback_SPCE_round2/output_14.json", 'r', encoding='utf-8') as file:
    # 加载JSON数据
    all_data_14 = json.load(file)
with open("datasets/Llama3_ultrafeedback_SPCE_round2/output_15.json", 'r', encoding='utf-8') as file:
    # 加载JSON数据
    all_data_15 = json.load(file)
with open("datasets/Llama3_ultrafeedback_SPCE_round2/output_16.json", 'r', encoding='utf-8') as file:
    # 加载JSON数据
    all_data_16 = json.load(file)
with open("datasets/Llama3_ultrafeedback_SPCE_round2/output_17.json", 'r', encoding='utf-8') as file:
    # 加载JSON数据
    all_data_17 = json.load(file)

prompts = [data["prompt"] for data in all_data_13]
candidates_responses = []
for data_13,data_14,data_15,data_16,data_17 in zip(all_data_13,all_data_14,all_data_15,all_data_16,all_data_17):
    candidates_responses.append([data_13['generated_text'],data_14['generated_text'],data_15['generated_text'],data_16['generated_text'],data_17['generated_text']])  

distinct_1_list = []
distinct_2_list = []
distinct_3_list = []
distinct_4_list = []
self_bleu_list = []

for sentences in candidates_responses:
    if len(distinct_1_list) > 1000:
        break
    # Calculate Distinct-1 and Distinct-2
    distinct_1 = calculate_distinct_n(sentences, 1)
    distinct_2 = calculate_distinct_n(sentences, 2)
    distinct_3 = calculate_distinct_n(sentences, 3)
    distinct_4 = calculate_distinct_n(sentences, 4)
    self_bleu = compute_self_bleu(sentences, n_gram=4)

    distinct_1_list.append(distinct_1)
    distinct_2_list.append(distinct_2)
    distinct_3_list.append(distinct_3)
    distinct_4_list.append(distinct_4)
    self_bleu_list.append(self_bleu)

print(f"Distinct-1: {sum(distinct_1_list)/len(distinct_1_list):.4f}")
print(f"Distinct-2: {sum(distinct_2_list)/len(distinct_2_list):.4f}")
print(f"Distinct-3: {sum(distinct_3_list)/len(distinct_3_list):.4f}")
print(f"Distinct-4: {sum(distinct_4_list)/len(distinct_4_list):.4f}")
print(f"self_bleu_list: {sum(self_bleu_list)/len(self_bleu_list):.4f}")