from transformers import AutoTokenizer,LEDConfig,LEDForConditionalGeneration
# from longformer import LongformerEncoderDecoderForConditionalGeneration
# from longformer import LongformerEncoderDecoderConfig
from datasets import load_dataset, load_metric
from rouge.rouge import Rouge
import torch
import json
from torch.utils.tensorboard import SummaryWriter

device = torch.device('cuda:2')
writer = SummaryWriter()

rouge = Rouge()
rouge1=0
rouge2=0
rougel=0
count = 0
# PRIMER_path='../summarization_PLM/PRIMER_model/'
# TOKENIZER = AutoTokenizer.from_pretrained(PRIMER_path)
# config = LEDConfig.from_pretrained(PRIMER_path)
# MODEL = LEDForConditionalGeneration.from_pretrained(PRIMER_path,config=config)
# MODEL = LongformerEncoderDecoderForConditionalGeneration.from_pretrained(
#             PRIMER_path, config=config)
config=LEDConfig.from_pretrained('allenai/PRIMERA')
tokenizer = AutoTokenizer.from_pretrained('allenai/PRIMERA',config = config)
model = LEDForConditionalGeneration.from_pretrained('allenai/PRIMERA',config=config)
model = model.to(device)
model.gradient_checkpointing_enable()
PAD_TOKEN_ID = tokenizer.pad_token_id
DOCSEP_TOKEN_ID = tokenizer.convert_tokens_to_ids("<doc-sep>")

def process_document(documents):
    input_ids_all=[]
    for data in documents:
        all_docs = data.split("|||||")
        for i, doc in enumerate(all_docs):
            doc = doc.replace("\n", " ")
            doc = " ".join(doc.split())
            all_docs[i] = doc

        #### concat with global attention on doc-sep
        input_ids = []
        for doc in all_docs:
            input_ids.extend(
                tokenizer.encode(
                    doc,
                    truncation=True,
                    max_length=4096 // len(all_docs),
                )[1:-1]
            )
            if len(all_docs)!=1:
                input_ids.append(DOCSEP_TOKEN_ID)
        input_ids = (
            [tokenizer.bos_token_id]
            + input_ids
            + [tokenizer.eos_token_id]
        )
        input_ids_all.append(torch.tensor(input_ids).to(device))
    input_ids = torch.nn.utils.rnn.pad_sequence(
        input_ids_all, batch_first=True, padding_value=PAD_TOKEN_ID
    )
    return input_ids


def batch_process(batch):
    global rouge1
    global rouge2
    global rougel
    global count
    result_test = {}
    input_ids=process_document(batch['document'])
    # get the input ids and attention masks together
    global_attention_mask = torch.zeros_like(input_ids)
    # put global attention on <s> token

    global_attention_mask[:, 0] = 1
    global_attention_mask[input_ids == DOCSEP_TOKEN_ID] = 1
    generated_ids = model.generate(
        input_ids=input_ids,
        global_attention_mask=global_attention_mask,
        max_length=256,
        num_beams=4,
    )
    generated_str = tokenizer.batch_decode(
            generated_ids.tolist(), skip_special_tokens=True
        )
    result={}
    result['generated_summaries'] = generated_str
    result['gt_summaries']=batch['summary']
    rouge_score = rouge.get_scores(result["generated_summaries"], result["gt_summaries"]) #['document','summary','generated_summaries','gt_summaries']都是list k个样本
    for num in range(0,len(batch)):
        for key in rouge_score[num]["rouge-l"].keys():
            writer.add_scalar(f'test/rouge-1_{key}', rouge_score[num]["rouge-1"][key], count)
            writer.add_scalar(f'test/rouge-2_{key}', rouge_score[num]["rouge-2"][key], count)
            writer.add_scalar(f'test/rouge-l_{key}', rouge_score[num]["rouge-l"][key], count)
        rouge1 = rouge1 + rouge_score[num]["rouge-1"]["r"]
        rouge2 = rouge2 + rouge_score[num]["rouge-2"]["r"]
        rougel = rougel + rouge_score[num]["rouge-l"]["f"]
        writer.add_scalar(f'test/average-rouge-1_r', rouge1/(count+1), count)
        writer.add_scalar(f'test/average-rouge-2_r', rouge2/(count+1), count)
        writer.add_scalar(f'test/average-rouge-l_f', rougel/(count+1), count)
        result_test.update({count:dict(origin_text=batch["document"][num],tgt_summaries=batch["summary"][num],generated_summaries=result['generated_summaries'][num],rouge_score=rouge_score[num])})
        writer.add_scalar(f'test/generated_len', len(result['generated_summaries'][num].split()), count)
        count = count + 1
    with open("./data/multinews/primer_multinews_test_1128.json", 'a', encoding='utf-8') as fw:
        json.dump(result_test, fw, indent=4, ensure_ascii=False)

    return result

dataset=load_dataset('multi_news')

# import random
# data_idx = random.choices(range(len(dataset['test'])),k=2)
# dataset_test = dataset['test'].select(data_idx)


dataset_test = dataset['test']
# dataset_test = dataset_test.select(range(count,len(dataset['validation'])))
result_small = dataset_test.map(batch_process, batched=True, batch_size=2) # rusult_small:['document','summary','generated_summaries','gt_summaries']

# rouge = load_metric("rouge")
# rouge = load_metric(path = "./metrics/rouge")
# result_small['generated_summaries']
# score=rouge.compute(predictions=result_small["generated_summaries"], references=result_small["gt_summaries"]) #score:[]
# print(score['rouge1'].mid)
# print(score['rouge2'].mid)
# print(score['rougeL'].mid)

rouge_avg = {}
rouge_avg.update({'rouge_avg':dict(rouge1_avg=rouge1 / len(dataset_test),rouge2_avg=rouge2 / len(dataset_test),rougel_avg=rougel / len(dataset_test))})
with open("./data/multinews/primer_multinews_test_1128.json", 'a', encoding='utf-8') as fw:
        json.dump(rouge_avg, fw, indent=4, ensure_ascii=False)


