from python_ai.common.xcommon import *
import numpy as np
import pandas as pd


def chunkstring(string, length):
    """
    https://stackoverflow.com/questions/18854620/whats-the-best-way-to-split-a-string-into-fixed-length-chunks-and-work-with-the/18854817

    :param string:
    :param length:
    :return:
    """
    str_len = len(string)
    return [string[0+i:length+i]
            for i in range(0, str_len, length)]


xpath = r'../../../../../large_data/DL1/dict/linux.words'

words_vector = pd.read_csv(xpath,
                           sep=r'\n',
                           engine='python',
                           encoding='utf8',
                           header=None
                           ).iloc[:, 0]
# print(words_vector[:50])
m = len(words_vector)
print('words vector length', m)

words_vector = words_vector.map(lambda x: str(x))
lens_vector = words_vector.map(lambda x: len(x))
# print(lens_vector[:50])
len_m = lens_vector.mean()
print('mean length:', len_m)
len_m = int(round(len_m))

long_txt = ' '.join(words_vector)
print('long text sample', long_txt[:100])
dict_set = set(list(long_txt))
dict_set_len = len(dict_set)
print('dict_set len:', dict_set_len)
print('dict set', dict_set)

idx2char = list(dict_set)
char2idx = dict()
for i, c in enumerate(idx2char):
    char2idx[c] = i
idx2char = np.array(idx2char)

n_steps = len_m * 2

# sentences
x_data_sent = chunkstring(long_txt, n_steps)
y_data_sent = chunkstring(long_txt[1:], n_steps)
# print(f'|{x_data_sent[-1]}|')
# print(f'|{y_data_sent[-1]}|')
x_data_sent[-1:] = [x_data_sent[-1].ljust(n_steps)]
y_data_sent[-1:] = [y_data_sent[-1].ljust(n_steps)]
# print(f'|{x_data_sent[-1]}|')
# print(f'|{y_data_sent[-1]}|')

# indexes
x_data_idx = np.array([[char2idx[c] for c in row] for row in x_data_sent])
check_shape(x_data_idx, 'x_data_idx')
y_data_idx = np.array([[char2idx[c] for c in row] for row in y_data_sent])
check_shape(y_data_idx, 'y_data_idx')

# onehot
x_data = np.eye(dict_set_len)[x_data_idx]