import json
import os
import sys
from functools import partial
from collections import defaultdict
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
import logging



import numpy as np
import accelerate
import random

sys.path.append("/home/lxy/DPR/Fengshenbang-LM/fengshen")
sys.path.append("/home/lxy/DPR/Fengshenbang-LM/fengshen/examples/pegasus") 
sys.path.append("/home/lxy/DPR/Fengshenbang-LM")
# Load model directly
from transformers import PegasusForConditionalGeneration
# Need to download tokenizers_pegasus.py and other Python script from Fengshenbang-LM github repo in advance,
# or you can download tokenizers_pegasus.py and data_utils.py in https://huggingface.co/IDEA-CCNL/Randeng_Pegasus_523M/tree/main
# Strongly recommend you git clone the Fengshenbang-LM repo:
# 1. git clone https://github.com/IDEA-CCNL/Fengshenbang-LM
# 2. cd Fengshenbang-LM/fengshen/examples/pegasus/
# and then you will see the tokenizers_pegasus.py and data_utils.py which are needed by pegasus model
from tokenizers_pegasus import PegasusTokenizer
# from parallelformers import parallelize


model = PegasusForConditionalGeneration.from_pretrained("IDEA-CCNL/Randeng-Pegasus-523M-Summary-Chinese")
# parallelize(model, num_gpus=2, fp16=True, verbose='detail')
model.half()
model.cuda()
tokenizer = PegasusTokenizer.from_pretrained("IDEA-CCNL/Randeng-Pegasus-523M-Summary-Chinese")

from torch.utils.data import Dataset,DataLoader
class documentDataset(Dataset):
    def __init__(self,document) -> None:
        super().__init__()
        self.data=document
    def __getitem__(self, index) :
        return self.data[index]
    def __len__(self):
        return len(self.data)
def my_collate(batch,tokenizer):
        return tokenizer(batch,max_length=1024,
                                    padding=True,
                                    truncation=True,
                                    is_split_into_words=False,
                                    add_special_tokens=True,
                                    return_tensors='pt')
cnt=0
with open('summary_doc.json', 'w') as summary_file:
    with open('/home/lxy/DPR/concatenated.json',) as doc_file:
        json_data=json.load(doc_file)
        

        doc=[x['content'] for x in json_data]
        dataset=documentDataset(doc)
        collate_fn = partial(my_collate, tokenizer=tokenizer)
        dataloader=DataLoader(dataset, batch_size=4, shuffle=False, num_workers=5,
                             collate_fn=collate_fn, pin_memory=True)
        dictt=[]
        from tqdm import tqdm 
        data_loader_with_progress = tqdm(dataloader, desc="Processing docs into embeddings", ncols=100)
        for inputs in data_loader_with_progress:
            summary_ids = model.generate(inputs["input_ids"].cuda(),max_length=1024)
            output=tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
            output=list(zip(range(cnt,cnt+len(output)),output))
            # print(output)
            cnt+=len(output)
            for d in output:
                dictt.append({'id':d[0],'content':d[1]})
        json.dump(dictt,summary_file,indent=4,ensure_ascii=False)


        print(dictt)