"""
https://huggingface.co/docs/transformers/tasks/translation
"""
from PyCmpltrtok.common import sep
import sys

IS_AUTODL = 0  # 是否是算力云，算力云写1，WSL写0



sep('Proxy')
import subprocess
import os

if IS_AUTODL:
    proxy_script = '/etc/network_turbo'  # 算力云
else:
    proxy_script = '/home/yunpeng/bin/util/set_proxy_v2ray.sh'  # 你自己的翻墙脚本
result = subprocess.run(f'bash -c "source {proxy_script} && env | grep proxy"', shell=True, capture_output=True, text=True)
output = result.stdout
for line in output.splitlines():
    if '=' in line:
        var, value = line.split('=', 1)
        os.environ[var] = value



sep('Dataset')
from datasets import load_dataset
# DATA_SET_DIR = "opus_books"
DATA_SET_DIR = os.path.join(os.environ['HOME'], ".cache/huggingface/datasets/opus_books/en-fr/0.0.0/1f9f6191d0e91a3c539c2595e2fe48fc1420de9b")
# DATA_SET_DIR = os.path.join(os.environ['HOME'], ".cache/huggingface/datasets/opus_books")
print(DATA_SET_DIR)

# books_ds = load_dataset(DATA_SET_DIR, "en-fr")
books_ds = load_dataset(DATA_SET_DIR)
print(books_ds)

print(books_ds['train'][:5])

sep('Suffle')
books = books_ds.shuffle(seed=666)
print(books['train'][:5])

sep('Split')
books = books['train'].train_test_split(test_size=0.2, seed=667)
print(books)

sep('Limit')
books['train'] = books['train'].select(range(1024))
books['test'] = books['test'].select(range(512))
print(books)

sep('Example')
print(books['train'][:5])
# sys.exit(0)


sep('Preprocess')
from transformers import AutoTokenizer

# checkpoint = "google-t5/t5-small"
checkpoint = os.path.join(os.environ['HOME'], ".cache/huggingface/hub/models--google-t5--t5-small/snapshots/df1b051c49625cf57a3d0d8d3863ed4d13564fe4")
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
print(tokenizer)

source_lang = "en"
target_lang = "fr"
prefix = "translate English to French: "


def preprocess_function(examples):
    inputs = [prefix + example[source_lang] for example in examples["translation"]]
    targets = [example[target_lang] for example in examples["translation"]]
    model_inputs = tokenizer(inputs, text_target=targets, max_length=128, truncation=True, padding=True, return_tensors='pt')
    return model_inputs


sep("preprocess_function(books['train'][:5])")
xinputs = preprocess_function(books['train'][:5])
print(xinputs)

sep('input_ids')
print([len(x) for x in xinputs['input_ids']])

sep('attention_mask')
print([len(x) for x in xinputs['attention_mask']])

sep('labels')
print([len(x) for x in xinputs['labels']])

sep('Decode input_ids')
xdecoded = tokenizer.batch_decode(xinputs['input_ids'])
print(xdecoded)

sep('Decode labels')
xdecoded = tokenizer.batch_decode(xinputs['labels'])
print(xdecoded)

sep('Tokenized dataset')
tokenized_books = books.map(preprocess_function, batched=True)
print(tokenized_books)



sep('Data Collator')
from transformers import DataCollatorForSeq2Seq
from PyCmpltrtok.common import ld2dl, dl2ld

data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint)
print(data_collator)


sep('Evaluate')
import evaluate

# ImportError: To be able to use evaluate-metric/sacrebleu, you need to install the following dependencies['sacrebleu']
# using 'pip install sacrebleu' for instance'
METRIC_DIR = "sacrebleu"
METRIC_DIR = os.path.join(os.environ['HOME'], '.cache/huggingface/modules/evaluate_modules/metrics/evaluate-metric--sacrebleu/28676bf65b4f88b276df566e48e603732d0b4afd237603ebdf92acaacf5be99b/sacrebleu.py')
metric = evaluate.load(METRIC_DIR)
print(metric)

import numpy as np


def postprocess_text(preds, labels):
    preds = [pred.strip() for pred in preds]
    labels = [[label.strip()] for label in labels]

    return preds, labels


def compute_metrics(eval_preds):
    preds, labels = eval_preds
    if isinstance(preds, tuple):
        preds = preds[0]
    decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)

    labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
    decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)

    decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)

    result = metric.compute(predictions=decoded_preds, references=decoded_labels)
    result = {"bleu": result["score"]}

    prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
    result["gen_len"] = np.mean(prediction_lens)
    result = {k: round(v, 4) for k, v in result.items()}
    return result


preds = data_collator(dl2ld(tokenizer((
    'How do you do?', 
    'My cat like fish.',
    "I'd rather like dancing with you and it.",
))))['input_ids']

labels = data_collator(dl2ld(tokenizer((
    'How are you?', 
    'My cat likes fish.',
    "I like to dance with you.",
))))['input_ids']

r = compute_metrics((preds, labels, ))
print(r)

r = compute_metrics((labels, preds, ))
print(r)

r = compute_metrics((preds, preds, ))
print(r)

r = compute_metrics((labels, labels, ))
print(r)



sep('Model')
from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer

model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
print(model)

sep('Infer by "generate"')
import torch
dev = torch.device('cuda:0')
model = model.to(dev)
xinputs = {k: xinputs[k].to(dev) for k in xinputs}
in_text = tokenizer.batch_decode(xinputs['input_ids'])
sep('in text')
print(in_text)
in_text_labels = tokenizer.batch_decode(xinputs['labels'])
sep('labels')
print(in_text_labels)
sep('predictions')
out = model.generate(xinputs['input_ids'], max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95)
out_text = tokenizer.batch_decode(out)
print(out_text)
