from augmentation import *
from label import *
from participle import *
from sequence import *
from sklearn.model_selection import train_test_split
from model_CNN import *
from predictor import *


def preprocess():
    # big数据集处理
    data_transform = DataTransform()

    # 读取json文件,1710857行
    data_transform.read_data(path='data/' + source_file)
    data_nums = len(data_transform.data)

    # 从data中抽取数据

    for mode in modes:
        data_transform.extract_data(mode)
        extract(data_transform, mode)

    # 重采样
    resample(data_transform.data,  min_count, max_times, mode='accusation')
    resample(data_transform.data,  min_count, max_times, mode='relevant_articles')

    # 分词
    data_transform.extract_data(name='fact')
    participle(data_transform, word_len, data_nums)

    # 词序列
    tokenizer = train_tokenizer(num_words, word_len, data_nums)
    sequence(tokenizer, word_len, max_len, data_nums)
    merge_sequence(num_words, max_len, data_nums)


def train_model(mode):
    # fact数据集
    fact = np.load('data/preprocess/fact_pad_seq_%d_%d.npy' % (num_words, max_len))
    fact_train, fact_test = train_test_split(fact, test_size=0.05, random_state=1)
    del fact
    # 标签数据集
    labels = np.load('data/preprocess/' + mode + '.npy')
    labels_train, labels_test = train_test_split(labels, test_size=0.05, random_state=1)
    del labels
    if mode != 'imprisonment':
        # 数据增强
        index_add_label = np.load('data/preprocess/index_add_%s_%d_%d.npy' % (mode, min_count, max_times))
        fact_train = np.concatenate([fact_train, fact_train[index_add_label]], axis=0)
        labels_train = np.concatenate([labels_train, labels_train[index_add_label]], axis=0)

    train(fact_train, labels_train, mode, num_words, max_len, kernel_size, dim, batch_size)


def test_model(mode):
    # fact数据集
    fact = np.load('data/preprocess/fact_pad_seq_%d_%d.npy' % (num_words, max_len))
    fact_train, fact_test = train_test_split(fact, test_size=0.05, random_state=1)
    del fact
    # 标签数据集
    labels = np.load('data/preprocess/' + mode + '.npy')
    labels_train, labels_test = train_test_split(labels, test_size=0.05, random_state=1)
    del labels
    test(mode, fact_test, labels_test)


def predict(source, mode):
    data_transform = DataTransform()
    data_transform.read_data(path='data/' + source)
    data_transform.extract_data('fact')
    data_transform.extract_data(mode)

    data = data_transform.extraction['fact']
    label = data_transform.extraction[mode]

    predictor = Predictor()
    result = predictor.predict_one(data, mode=mode)
    for x, y in zip(result, label):
        print(x, y)


if __name__ == '__main__':
    source_file = 'rest_data.json'  # rest_data 748203
    min_count = 5000  # 某一罪名案例需填充至的最小数目
    max_times = 100  # 某一罪名案例可扩充的最大倍数（优先满足）
    word_len = 2  # 分词后保留词语的最短长度
    num_words = 40000  # 保留词语的个数
    max_len = 400  # 词序列的最大长度
    # 模型设置
    kernel_size = 3
    dim = 512
    batch_size = 256

    modes = ['accusation', 'relevant_articles', 'imprisonment']
    preprocess()
    for mode in modes:
        train_model(mode)
        # test_model(mode)

    # source = 'cail2018_small.json'
    # predict(source, mode='accusation')
