# -*- encoding: utf-8 -*-
"""
@author: acedar  
@time: 2023/10/5 13:12
@file: verify.py 
"""

from modeling import PositionalEncoding, subsequent_mask, LabelSmoothing, NoamOpt, make_model
import torch
import numpy as np
import math
from torch.autograd import Variable

# For plots
import matplotlib.pyplot as plt


def verify_subsequent_mask():
    plt.figure(figsize=(5, 5))
    plt.imshow(subsequent_mask(20)[0])
    plt.show()


def verify_positional_encoding():
    plt.figure(figsize=(15, 5))
    pe = PositionalEncoding(20, 0)
    y = pe.forward(Variable(torch.zeros(1, 100, 20)))
    plt.plot(np.arange(100), y[0, :, 4:8].data.numpy())
    plt.legend(["dim %d" % p for p in [4, 5, 6, 7]])
    plt.show()


def verify_make_model():
    # Small example model.
    tmp_model = make_model(10, 10, 2)
    print("tmp_model:", tmp_model)


def verify_noam_opt():
    # Three settings of the lrate hyperparameters.
    opts = [NoamOpt(512, 1, 4000, None),
        NoamOpt(512, 1, 8000, None),
        NoamOpt(256, 1, 4000, None)]
    plt.plot(np.arange(1, 20000), [[opt.rate(i) for opt in opts] for i in range(1, 20000)])
    plt.legend(["512:4000", "512:8000", "256:4000"])
    plt.show()


def verify_label_smoothing():
    crit = LabelSmoothing(5, 0, 0.5)
    predict = torch.FloatTensor([[0.1, 0.2, 0.5, 0.1, 0.1],
                                 [0.1, 0.2, 0.5, 0.1, 0.1],
                                 [0.1, 0.2, 0.5, 0.1, 0.1]])
    v = crit(Variable(predict.log()),
             Variable(torch.LongTensor([2, 1, 0])))

    # Show the target distributions expected by the system.
    print("v:", v)
    plt.imshow(crit.true_dist)
    plt.show()


def verify_label_smoothing2():
    crit = LabelSmoothing(5, 0, 0.2)
    def loss(x):
        d = x + 3 * 1
        predict = torch.FloatTensor([[0.1, x / d, 1 / d, 1 / d, 1 / d],
                                     ])
        #print(predict)
        print("math.log(0.1): ", math.log(0.1))
        return crit(Variable(predict.log()), Variable(torch.LongTensor([1]))).item()
    plt.plot(np.arange(1, 100), [loss(x) for x in range(1, 100)])
    plt.show()


def verify_pos_cal():
    max_len = 100
    d_model = 16
    pe = torch.zeros(max_len, d_model)
    position = torch.arange(0, max_len).unsqueeze(1)

    x = torch.arange(0, d_model, 2)
    print("x:", x)
    x = x * -(math.log(10000.0) / d_model)
    print("x:", x)
    div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
    # div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
    x = math.exp(6 * (-math.log(10000.0)) / d_model)
    print("x:", x)

    print("position:", position.size())
    print("div_term:", div_term.size(), div_term[:8])
    pe[:, 0::2] = torch.sin(position * div_term)
    pe[:, 1::2] = torch.cos(position * div_term)


# verify_subsequent_mask()
verify_positional_encoding()
# verify_pos_cal()
# verify_noam_opt()

# verify_make_model()
# verify_label_smoothing()
# verify_label_smoothing2()
