#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time    : 2020/9/17 下午3:53
# @Author  : fugang_le
# @Software: PyCharm

# -*- coding:utf-8 -*-
########################################
## import packages
########################################
import re
import jieba
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical

# from app.libs.log_util import logger
from src.config import siamese_train_path
from src.lstm.config import Config

# stop_words = load_stop_words()

# def labels_to_idx_handle():
#     labels = get_labels()
#     assert Config.nb_classes == len(labels)
#     return {label:i for i, label in enumerate(labels)}
#
# label_to_idx = labels_to_idx_handle()

def load_data():
    data = []
    with open(siamese_train_path, mode='r') as fr:
        for line in fr:
            line = line.strip()
            if line:
                lines = line.split('\t')
                if len(lines) == 3:
                    text1 = lines[0]
                    text2 = lines[1]
                    score = float(lines[2])
                    data.append((text1, text2, score))
    return data


def transform_input_texts(tokenizer, texts):
    sequences = tokenizer.texts_to_sequences(texts)
    data = pad_sequences(sequences, maxlen=Config.max_sequence_length)
    # labels = to_categorical(labels)
    return data


def data_reprocess(text):
    text = text.strip()
    text = text.lower()
    # words = jieba.lcut(text)
    words = list(text)
    input_text = ' '.join(words)
    return input_text


def split_train_test_dev_data():
    data = load_data()
    size = len(data)

    # np.random
    idx_list = [i for i in range(size)]
    np.random.shuffle(idx_list, )

    shuffle_data = []
    for i in idx_list:
        shuffle_data.append(data[i])

    flag1 = int(size * 0.9)
    flag2 = int(size * 0.8)
    train_data = shuffle_data[:flag2]
    dev_data = shuffle_data[flag2:flag1]
    test_data = shuffle_data[flag1:]

    return train_data, dev_data, test_data

# train_data, dev_data, test_data = split_train_test_dev_data()

def get_train_data(tokenizer):
    texts1 = []
    texts2 = []
    scores = []
    for text1, text2, score in train_data:
        texts1.append(data_reprocess(text1))
        texts2.append(data_reprocess(text2))
        scores.append(score)
    train_text1s = transform_input_texts(tokenizer, texts1)
    train_text2s = transform_input_texts(tokenizer, texts2)
    train_inputs, train_labels = (train_text1s, train_text2s), scores

    texts1 = []
    texts2 = []
    scores = []
    for text1, text2, score in dev_data:
        texts1.append(data_reprocess(text1))
        texts2.append(data_reprocess(text2))
        scores.append(score)
    dev_text1s = transform_input_texts(tokenizer, texts1)
    dev_text2s = transform_input_texts(tokenizer, texts2)
    dev_inputs, dev_labels, = (dev_text1s, dev_text2s), scores

    texts1 = []
    texts2 = []
    scores = []
    for text1, text2, score in test_data:
        texts1.append(data_reprocess(text1))
        texts2.append(data_reprocess(text2))
        scores.append(score)
    test_text1s = transform_input_texts(tokenizer, texts1)
    test_text2s = transform_input_texts(tokenizer, texts2)
    test_inputs, test_labels, = (test_text1s, test_text2s), scores

    return train_inputs, train_labels, dev_inputs, dev_labels, test_inputs, test_labels


def get_tokenizer_train_data():
    data = []
    for text1, text2, score in train_data:
        data.append(data_reprocess(text1))
        data.append(data_reprocess(text2))
    return data


def hand_text(tokenizer, texts):
    data = [data_reprocess(text) for text in texts]
    input_texts = transform_input_texts(tokenizer, data)
    return input_texts
