import random
from typing import List, Tuple, Optional

import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import torch
from torch import Tensor


def decompression_embedding(embedding: Tensor, algo: str = "pca", ) -> Tuple[List[float], List[float]]:
    """
    decompress the embedding with PCA&TSNE algo into two dim default
    Args:
        embedding: the pytorch tensor embedding
        algo: the name of algo, which is only support PCA & TSNE

    Returns: the decompressed embedding which can be visualized

    """
    embedding_np: np.ndarray = embedding.detach().cpu().numpy()

    if algo == 'pca':
        decomposition = PCA(n_components=2)

    elif algo == 'tsne':
        decomposition = TSNE(n_components=2)
    else:
        raise ValueError(f'algo<{algo}> not supported ...')

    de_embeddings: np.ndarray = decomposition.fit_transform(embedding_np)
    return de_embeddings[:, 0], de_embeddings[:, 1]


def gen_color_by_numbers(count: int) -> List[str]:
    """
    gen random color format string by count, which is useful in matplotlib
    Args:
        count: the number of colors

    Returns: the final colors format string

    """
    colors: List[str] = []
    for _ in range(count):
        colors.append(
            "#" + ''.join([random.choice('0123456789ABCDEF') for j in range(6)])
        )
    return colors


def show_embeddings(embeddings: Tensor, labels: List[str], output_img: Optional[str] = None,
                    title="word embedding scatter map") -> None:
    """
    show pytorch embedding and save it as image file.
    Args:
        embeddings: pytorch embedding, which the size of the shape should be two
        labels: every label of embedding
        output_img: the output file of the image
        title: the title of the png image
    """
    assert len(embeddings.shape) == 2
    assert embeddings.shape[0] == len(labels)

    x, y = decompression_embedding(embeddings, 'pca')

    label_set = list(set(labels))
    colors = gen_color_by_numbers(len(label_set))
    embedding_color = []
    for label in labels:
        index = label_set.index(label)
        embedding_color.append(colors[index])

    fig, ax = plt.subplots()
    ax.legend(labels)
    ax.scatter(x, y, c=embedding_color, alpha=0.5)
    ax.legend()
    ax.set_title(title)
    fig.tight_layout()

    if output_img:
        plt.savefig(output_img)

    plt.show()


def main():
    words_n = 10
    embeddings = torch.randn(words_n, 100)
    show_embeddings(embeddings, [str(i) for i in range(words_n)])
    print(embeddings.shape)
