import os
import pandas as pd
import matplotlib.pyplot as plt
# from transformers import AutoTokenizer
from transformers import BartTokenizer, T5Tokenizer
from datasets import Dataset

os.environ['CUDA_VISIBLE_DEVICES'] = '2'

# Load the dataset
train = pd.read_csv('../datasets/pull_request/train.pr_commits_20_400_100_0.5_nltk.csv')
valid = pd.read_csv('../datasets/pull_request/valid.pr_commits_20_400_100_0.5_nltk.csv')
test = pd.read_csv('../datasets/pull_request/test.pr_commits_20_400_100_0.5_nltk.csv')
dataset = train.append([valid, test], ignore_index=True)
dataset = dataset[['article', 'abstract']]
dataset.columns = ['src', 'target']
# dataset = pd.read_csv('../datasets/body_title/total.csv', low_memory=False)
dataset.dropna(axis=0, how='any', inplace=True)
dataset = Dataset.from_pandas(dataset)
print(dataset)

# Tokenization
tokenizer = BartTokenizer.from_pretrained('facebook/bart-base')

def tokenize(batch):
    tokenized_input = tokenizer(batch['src'])
    tokenized_label = tokenizer(batch['target'])
    tokenized_input['labels'] = tokenized_label['input_ids']
    return tokenized_input


dataset = dataset.map(tokenize, batched=True, batch_size=512)
dataset.set_format('numpy', columns=['input_ids', 'attention_mask', 'labels'])
dataset = dataset.to_pandas()

# Analyze the max length of input and output
dataset['src_len'] = dataset['input_ids'].map(lambda x: len(x))
dataset['target_len'] = dataset['labels'].map(lambda x: len(x))

print(dataset['src_len'].quantile([0.70, 0.80, 0.85, 0.90, 0.95, 0.98]))
print(dataset['target_len'].quantile([0.70, 0.80, 0.85, 0.90, 0.95, 0.98]))

# plt.figure()
# dataset['src_len'].value_counts().plot(kind='bar')
# plt.figure()
# dataset['target_len'].value_counts().plot(kind='bar')
# plt.show()
