import pandas as pd
import jieba
from collections import defaultdict
import numpy as np
import os


# 分词，添加额外的专有名词
def tokenize(string):
    new_words = ['李子期', '未来城', '华一']
    for word in new_words:
        jieba.add_word(word, 1000)

    res = jieba.lcut(string, cut_all=False)
    if '#' in res:
        for i in range(len(res) - 1):
            if res[i + 1] == '#':
                tmp_s = res[i] + res[i + 1] + res[i + 2]
                res[i] = tmp_s
                for j in range(i + 1, len(res) - 2):
                    res[j] = res[j + 2]
                break
        res = res[:-2]

    # 把编号改成统一的标识符
    for i in range(len(res)):
        if len(res[i]) >= 3 and (res[i][0].isdigit() or res[i][0].encode('UTF-8').isalpha()):
            res[i] = '[CODE]'
    return res


# 构建词典
def build_vocab(del_word_frequency):
    data = pd.read_csv('data.csv')
    segments = data['question'].apply(tokenize)

    # dataset = []
    # for i in range(0, len(data)):
    #     seg = tokenize(data.iloc[i, 0])
    #     dataset.append(seg)
    word_frequency = defaultdict(int)
    for row in segments:
        for i in row:
            word_frequency[i] += 1
    word_sort = sorted(word_frequency.items(), key=lambda x: x[1], reverse=True)  # 根据词频降序排序

    f = open('vocab.txt', 'w', encoding='utf-8')
    f.write('[PAD]' + "\n" + '[UNK]' + "\n" + '[CODE]' + "\n")
    for d in word_sort:
        if d[1] > del_word_frequency and d[0] != '[CODE]':
            f.write(d[0] + "\n")
    f.close()


# 划分训练集和测试集
def split_data(df, split=0.7):
    train_data = df.sample(frac=split)
    eval_data = df.drop(train_data.index)
    eval_data = eval_data.sample(frac=1)

    return train_data, eval_data


# 把数据转换成index
def seq2index(seq):
    vocab_path = os.path.join(os.path.dirname(__file__), 'vocab.txt')
    vocab = pd.read_csv(vocab_path, header=None)
    dictionary = dict(zip(vocab[0].values, vocab.index.values))
    seg = tokenize(seq)
    for i in range(0, len(seg)):
        if seg[i] in dictionary.keys():
            seg[i] = dictionary[seg[i]]
        else:
            seg[i] = dictionary['[UNK]']
    return seg


# 统一长度
def padding_seq(X, max_len=15):
    return np.array([
        np.concatenate([x, [0] * (max_len - len(x))]) if len(x) < max_len else x[:max_len] for x in X
    ])


def balance_data():
    df = pd.read_csv('data.csv')
    print(df['label'].value_counts())
    df_0 = df[df['label'] == 0].sample(frac=0.2)
    df_2 = df[df['label'] == 2].sample(frac=0.5)
    df_all = df[df['label'] == 1].append(df_0).append(df_2).append(df[df['label'] == 3]).append(df[df['label'] == 4])
    print(df_all['label'].value_counts())
    df_all.to_csv('parent_data.csv', index=False, encoding='utf_8_sig')


if __name__ == '__main__':
    build_vocab(1)
