import re
import jieba



def load_stop_word():
    stop_words = []
    with open(STOP_WORD_PATH) as f:
        lines = f.readlines()
        for line in lines:
            stop_words.append(line.strip())
    return stop_words


def build_input(self, texts):
    DEL_PATTERN = re.compile('(\(.+?\)|^\*(.+?)\*|^(.+?),)')

    result = []
    for text in texts:
        if not text:
            continue
        new_text = text.strip()
        new_text = DEL_PATTERN.sub('', new_text)
        words = [word for word in jieba.lcut(new_text) if not word in self.stop_word]
        words_str = '\t'.join(words)
        result.append(words_str)

    return result


def data_preprocessing(self):
    logging.info('data preprocessing  ......')
    train_contents = []
    test_contents = []
    for k, v in self.data.items():
        inputs = self.build_input(v)
        inputs_len = len(inputs)
        if inputs_len > 20:
            index = int(inputs_len * 0.9)
            train_contents.extend([words_str + "\t__label__" + k for words_str in inputs[:index]])
            test_contents.extend([words_str + "\t__label__" + k for words_str in inputs[index:]])
        else:
            train_contents.extend([words_str + "\t__label__" + k for words_str in inputs])

    logging.info("data size: {}".format((len(train_contents) + len(test_contents))))
    logging.info("train data size: {}".format(len(train_contents)))
    logging.info("test data size: {}".format(len(test_contents)))

    random.shuffle(train_contents)
    df_data_train = pd.DataFrame({"content": train_contents})
    df_data_train.to_csv(train_data_path, index=False, quoting=0)
    df_data_test = pd.DataFrame({"content": test_contents})
    df_data_test.to_csv(test_data_path, index=False, quoting=0)