import matplotlib.pyplot as plt
import numpy as np
from optimiser import  *
initial = np.array([0.2, 0.8])
transition = np.array([[0.2, 0.8],
                       [0.6, 0.4]])
emission = np.array([[0.0, 0.1],
                     [0.3, 0.8],
                     [0.7, 0.1]])

# Step 2: synthesise sequences
from data import DataLoader
dataloader = DataLoader(initial=initial,
                        transition=transition,
                        emission=emission)

data_list = dataloader.get_data_list(300)

# Step 3: fit an HMM by using HMMOptimiser class
optim = HMMOptimiser(num_hiddens=2, num_observations=3)
_ = optim.baum_welch(data_list)
hmm = optim.model

# Step 4: print the parameters fit on the synthetic data
# Note that parameters of our HMM class are in the log space
print('true initial:\n', initial)
print('fitted initial:\n', np.exp(hmm.initial))
print('true transition:\n', transition)
print('fitted transition:\n', np.exp(hmm.transition))
print('true emission:\n', emission)
print('fitted emission:\n', np.exp(hmm.emission))


def get_true_params() -> Tuple[ArrayLike]:
    initial = np.array([0.2, 0.8])
    transition = np.array([[0.2, 0.8],
                           [0.6, 0.4]])
    emission = np.array([[0.0, 0.1],  # probability of emitting <EOS>
                         [0.3, 0.8],
                         [0.7, 0.1]])
    return initial, transition, emission


def get_dist_to_true_params(params: Tuple[ArrayLike],
                            true_params: Tuple[ArrayLike]) -> float:
    # Compute the Euclidean distance (L2 norm) between the learnt parameters and the true parameters

    # To handle the non-identifiability of the HMM, for the 2D hiddens case
    # we simply consider the two possible orderings and keep whichever is smaller
    true_params_stacked = np.vstack([true_params[0],
                                     true_params[1],
                                     true_params[2]]).flatten()
    true_params_permuted = np.vstack([np.flip(true_params[0]),
                                      np.flip(true_params[1]),
                                      true_params[2][:, ::-1]]).flatten()

    params_vec = np.vstack([params[0], params[1], params[2]]).flatten()

    return min(np.linalg.norm(params_vec - true_params_stacked, ord=2),
               np.linalg.norm(params_vec - true_params_permuted, ord=2))


def sanity_check() -> List:
    num_sample_list = [50, 500]
    num_runs = 50

    true_params = get_true_params()
    dataloader = DataLoader(initial=true_params[0],
                            transition=true_params[1],
                            emission=true_params[2])
    data_list = dataloader.get_data_list(max(num_sample_list))

    result_list = []
    for num_samples in num_sample_list:
        # Only use the first num_samples data-points from the data_list
        dl = data_list[:num_samples]
        for i in range(num_runs):
            print(f'Starting run {i}/{num_runs} with data size {num_samples}.')
            optim = HMMOptimiser(num_hiddens=2,
                                 num_observations=3)
            loglikelihood_list = optim.baum_welch(dl, verbose=False)
            nondec_loglike = all(x <= y for x, y in zip(loglikelihood_list[:-1], loglikelihood_list[1:]))
            params = (np.exp(optim.model.initial),
                      np.exp(optim.model.transition),
                      np.exp(optim.model.emission))
            dist = get_dist_to_true_params(params, true_params)

            result = {
                'num_samples': num_samples,
                'distance': dist,
                'non_decreasing': 1 if nondec_loglike else -1
            }
            result_list.append(result)

    return result_list

result_list = sanity_check()
distances = {r['num_samples']: [] for r in result_list}
for r in result_list:
    distances[r['num_samples']].append(r['distance'])

fig, ax = plt.subplots()

for i, (num_samples, dist) in enumerate(distances.items()):
    mean_dist = np.mean(dist)
    std_err = np.std(dist, ddof=1) / (len(dist) ** (0.5))
    ax.errorbar(num_samples, mean_dist, yerr=std_err, marker='_', capsize=50, markersize=30)
    print(f'Num samples: {num_samples}, mean {mean_dist}, std. err. {std_err}')

ax.set_xlim(-200, 750)
ax.set_xticks(ticks=list(distances.keys()), labels=list(distances.keys()))
ax.set_ylabel('distance')
ax.set_xlabel('# samples')

nondescs = {r['num_samples']: [] for r in result_list}
for r in result_list:
    nondescs[r['num_samples']].append(r['non_decreasing'])

fig, ax = plt.subplots()

for i, (num_samples, nondesc) in enumerate(nondescs.items()):
    ax.scatter([num_samples] * len(nondesc), nondesc)

ax.set_xlim(-200, 750)
ax.set_xticks(ticks=list(nondescs.keys()), labels=list(nondescs.keys()))
ax.set_ylabel('non decreasing log-likelihood')
ax.set_xlabel('# samples')