import os
import torch
import time
import pandas as pd
from transformers import AutoTokenizer, T5ForConditionalGeneration, BartTokenizer, BartForConditionalGeneration, AutoModelForSeq2SeqLM

# Define the parameters
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
batch_size = 1
input_max_length = 498
output_max_length = 21
base_tokenizer = 'facebook/bart-base'
device = 'cuda'
model_dir = 'output/models/bart-base-best-metric-rouge-pr-title-notoken-bs8-498-21'
input_file = 'datasets/PRTiger/no-token/test.csv'
output_file = 'output/predictions/bart-base-best-metric-rouge-pr-title-notoken-bs8-498-21/predictions-bs-100.csv'

# Load the dataset
print("start time: " + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
input_texts = pd.read_csv(input_file)
# input_texts = "generate comment: "+input_texts['code'].astype('str')
input_texts = input_texts['text']
# input_texts = input_texts['s']
total_len = len(input_texts)
print(total_len)

# Tokenization
tokenizer = AutoTokenizer.from_pretrained(base_tokenizer)

# model = T5ForConditionalGeneration.from_pretrained(model_dir)
model = AutoModelForSeq2SeqLM.from_pretrained(model_dir)
model.to(device)
model.eval()

torch.manual_seed(0)
predictions = []

with torch.no_grad():
    cache = []

    for index, input_text in input_texts.items():
        if index > 0 and index % batch_size == 0:
            print("Progress: {}/{}".format(index, total_len), len(predictions), predictions[-1][:5])

        cache.append(input_text)
        if (index + 1) % batch_size != 0 and index + 1 != len(input_texts):
            continue

        tokenized_text = tokenizer(cache, truncation=True, padding='max_length', max_length=input_max_length,
                                   return_tensors='pt')

        source_ids = tokenized_text['input_ids'].to(device, dtype=torch.long)
        source_mask = tokenized_text['attention_mask'].to(device, dtype=torch.long)

        # greedy search
        # generated_ids = model.generate(
        #     input_ids=source_ids,
        #     attention_mask=source_mask,
        #     max_length=40,
        # )
        # beam search
        generated_ids = model.generate(
            input_ids=source_ids,
            attention_mask=source_mask,
            max_length=output_max_length,
            num_beams=100,
            # sibling_penalty=0.2,
            # num_beam_groups=10,
            # diversity_penalty=0.2,
            num_return_sequences=100,
            early_stopping=True
        )
        # temperature
        # generated_ids = model.generate(
        #     input_ids=source_ids,
        #     attention_mask=source_mask,
        #     do_sample=True,
        #     max_length=23,
        #     top_k=0,
        #     temperature=0.7,
        #     num_return_sequences=10
        # )
        # top-k k=50
        # generated_ids = model.generate(
        #     input_ids=source_ids,
        #     attention_mask=source_mask,
        #     do_sample=True,
        #     max_length=23,
        #     top_k=50,
        #     num_return_sequences=10
        # )
        # top-p
        # generated_ids = model.generate(
        #     input_ids=source_ids,
        #     attention_mask=source_mask,
        #     do_sample=True,
        #     max_length=23,
        #     top_k=0,
        #     top_p=0.95,
        #     num_return_sequences=10
        # )
        # generated_ids = model.generate(
        #     input_ids=source_ids,
        #     attention_mask=source_mask,
        #     do_sample=True,
        #     max_length=40,
        #     top_k=50,
        #     top_p=0.95,
        #     num_return_sequences=3
        # )

        generated_texts = []
        for i, generated_id in enumerate(generated_ids):
            generated_texts.append(tokenizer.decode(generated_id, skip_special_tokens=True))
            if (i + 1) % 100 == 0:
                predictions.append(generated_texts)
                generated_texts = []

        cache = []

predictions = pd.DataFrame(predictions)
# predictions.columns = ['prediction1', 'prediction2', 'prediction3', 'prediction4', 'prediction5', 'prediction6',
#                        'prediction7', 'prediction8', 'prediction9', 'prediction10']
predictions.to_csv(output_file)
print(predictions)
print("finish time: " + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
