# -*- coding: utf-8 -*-

from sklearn.model_selection import train_test_split

import codecs
import jieba
import pickle

jieba.load_userdict('custom_data/word_level/atec_dict.txt')


def train_valid_split(raw_file):
    labels = list()

    with codecs.open(raw_file, encoding='utf-8') as f_raw:
        lines = f_raw.readlines()
        for line in lines:
            _, _, _, label = line.strip().split('\t')
            labels.append(label)

    train_lines, valid_lines = train_test_split(lines, test_size=0.1, random_state=7, stratify=labels)

    with codecs.open('datasets/train.csv', 'w', encoding='utf-8') as f_train, \
            codecs.open('datasets/valid.csv', 'w', encoding='utf-8') as f_valid:
        f_train.writelines(list(train_lines))
        f_valid.writelines(list(valid_lines))


def build_word_level_corpus(raw_file):
    sentences = list()

    with codecs.open(raw_file, encoding='utf-8') as f_train:
        lines = f_train.readlines()
        for line in lines:
            _, input_a, input_b, _ = line.strip().split('\t')
            sentences.extend([input_a, input_b])

    target_lines = [' '.join(jieba.cut(sentence)) + '\n' for sentence in sentences]

    with codecs.open('datasets/word_level/corpus.txt', 'w', encoding='utf-8') as f_corpus:
        f_corpus.writelines(target_lines)


def build_char_level_corpus(raw_file):
    sentences = list()

    with codecs.open(raw_file, encoding='utf-8') as f_train:
        lines = f_train.readlines()
        for line in lines:
            _, input_a, input_b, _ = line.strip().split('\t')
            sentences.extend([input_a, input_b])

    target_lines = list()
    for sentence in sentences:
        target_lines.append(' '.join([char for char in sentence]) + '\n')

    with codecs.open('datasets/char_level/corpus.txt', 'w', encoding='utf-8') as f_corpus:
        f_corpus.writelines(target_lines)


def build_word_level_vocabulary(train_file):
    sentences = list()

    with codecs.open(train_file, encoding='utf-8') as f_train:
        lines = f_train.readlines()
        for line in lines:
            _, input_a, input_b, _ = line.strip().split('\t')
            sentences.extend([input_a, input_b])

    corpus = u'。'.join(sentences)
    word_list = list(set([tk[0] for tk in jieba.tokenize(corpus)]))

    return dict((word, idx+1) for idx, word in enumerate(word_list))


def build_char_level_vocabulary(train_file):
    sentences = list()

    with codecs.open(train_file, encoding='utf-8') as f_train:
        lines = f_train.readlines()
        for line in lines:
            _, input_a, input_b, _ = line.strip().split('\t')
            sentences.extend([input_a, input_b])

    corpus = u''.join(sentences)
    char_list = list(set([char for char in corpus]))

    return dict((char, idx+1) for idx, char in enumerate(char_list))


def load_data(raw_file, level, test=False):
    if test:
        if level == 'word':
            with open('custom_data/word_level/vocabulary.pkl', 'rb') as f_vocabulary:
                vocabulary = pickle.load(f_vocabulary)
            x_a = list()
            x_b = list()
            with codecs.open(raw_file, encoding='utf-8') as f_test:
                lines = f_test.readlines()
                for line in lines:
                    _, input_a, input_b = line.strip().split('\t')[0:3]
                    x_a.append([vocabulary.get(word, len(vocabulary)+1) for word in jieba.cut(input_a)])
                    x_b.append([vocabulary.get(word, len(vocabulary)+1) for word in jieba.cut(input_b)])
        else:
            with open('custom_data/char_level/vocabulary.pkl', 'rb') as f_vocabulary:
                vocabulary = pickle.load(f_vocabulary)
            x_a = list()
            x_b = list()
            with codecs.open(raw_file, encoding='utf-8') as f_test:
                lines = f_test.readlines()
                for line in lines:
                    _, input_a, input_b = line.strip().split('\t')[0:3]
                    x_a.append([vocabulary.get(char, len(vocabulary)+1) for char in input_a])
                    x_b.append([vocabulary.get(char, len(vocabulary)+1) for char in input_b])

        return x_a, x_b, vocabulary

    if level == 'word':
        with open('custom_data/word_level/vocabulary.pkl', 'rb') as f_vocabulary:
            vocabulary = pickle.load(f_vocabulary)
        x_a = list()
        x_b = list()
        y = list()
        with codecs.open(raw_file, encoding='utf-8') as f_train:
            lines = f_train.readlines()
            for line in lines:
                _, input_a, input_b, label = line.strip().split('\t')
                x_a.append([vocabulary[word] for word in jieba.cut(input_a)])
                x_b.append([vocabulary[word] for word in jieba.cut(input_b)])
                y.append(float(label))
    else:
        with open('custom_data/char_level/vocabulary.pkl', 'rb') as f_vocabulary:
            vocabulary = pickle.load(f_vocabulary)
        x_a = list()
        x_b = list()
        y = list()
        with codecs.open(raw_file, encoding='utf-8') as f_train:
            lines = f_train.readlines()
            for line in lines:
                _, input_a, input_b, label = line.strip().split('\t')
                x_a.append([vocabulary[char] for char in input_a])
                x_b.append([vocabulary[char] for char in input_b])
                y.append(float(label))

    return x_a, x_b, y, vocabulary


# def load_train_data(train_file, level='char'):
#     if level == 'word':
#         with open('custom_data/word_level/vocabulary.pkl', 'rb') as f_vocabulary:
#             vocabulary = pickle.load(f_vocabulary)
#         x_a = list()
#         x_b = list()
#         y = list()
#         with codecs.open(train_file, encoding='utf-8') as f_train:
#             lines = f_train.readlines()
#             for line in lines:
#                 _, input_a, input_b, label = line.strip().split('\t')
#                 x_a.append([vocabulary[word] for word in jieba.cut(input_a)])
#                 x_b.append([vocabulary[word] for word in jieba.cut(input_b)])
#                 y.append(float(label))
#     else:
#         with open('custom_data/char_level/vocabulary.pkl', 'rb') as f_vocabulary:
#             vocabulary = pickle.load(f_vocabulary)
#         x_a = list()
#         x_b = list()
#         y = list()
#         with codecs.open(train_file, encoding='utf-8') as f_train:
#             lines = f_train.readlines()
#             for line in lines:
#                 _, input_a, input_b, label = line.strip().split('\t')
#                 x_a.append([vocabulary[char] for char in input_a])
#                 x_b.append([vocabulary[char] for char in input_b])
#                 y.append(float(label))
#
#     return x_a, x_b, y, vocabulary
#
#
# def load_test_data(test_file, level='char'):
#     if level == 'word':
#         with open('custom_data/word_level/vocabulary.pkl', 'rb') as f_vocabulary:
#             vocabulary = pickle.load(f_vocabulary)
#         x_a = list()
#         x_b = list()
#         with codecs.open(test_file, encoding='utf-8') as f_train:
#             lines = f_train.readlines()
#             for line in lines:
#                 _, input_a, input_b = line.strip().split('\t')[0:3]
#                 x_a.append([vocabulary.get(word, 0) for word in jieba.cut(input_a)])
#                 x_b.append([vocabulary.get(word, 0) for word in jieba.cut(input_b)])
#     else:
#         with open('custom_data/char_level/vocabulary.pkl', 'rb') as f_vocabulary:
#             vocabulary = pickle.load(f_vocabulary)
#         x_a = list()
#         x_b = list()
#         with codecs.open(test_file, encoding='utf-8') as f_train:
#             lines = f_train.readlines()
#             for line in lines:
#                 _, input_a, input_b = line.strip().split('\t')[0:3]
#                 x_a.append([vocabulary.get(char, 0) for char in input_a])
#                 x_b.append([vocabulary.get(char, 0) for char in input_b])
#
#     return x_a, x_b, vocabulary


if __name__ == '__main__':
    vocab = build_word_level_vocabulary('datasets/atec_nlp_sim_train_all.csv')
    with open('custom_data/word_level/vocabulary.pkl', 'wb') as vocabulary_pkl:
        pickle.dump(vocab, vocabulary_pkl, -1)
    print(len(vocab))
    print(vocab.get(u'花呗'))

    vocab = build_char_level_vocabulary('datasets/atec_nlp_sim_train_all.csv')
    with open('custom_data/char_level/vocabulary.pkl', 'wb') as vocabulary_pkl:
        pickle.dump(vocab, vocabulary_pkl, -1)
    print(len(vocab))
    print(vocab.get(u'花'))
    build_word_level_corpus('datasets/atec_nlp_sim_train_all.csv')
    build_char_level_corpus('datasets/atec_nlp_sim_train_all.csv')
    train_valid_split('datasets/atec_nlp_sim_train_all.csv')
