# -*- coding: utf-8 -*-

import pickle
import re

def get_stopwords(stopwordfile):
    with open(stopwordfile, 'r', encoding='utf-8') as f:
        stopword = [line.strip() for line in f]
    f.close()
    return set(stopword)

def cut_sentence(paragraph,sentence_split_pattern):
    result = []
    sentences = re.split(sentence_split_pattern, paragraph)  # 保留分割符
    for i in range(int(len(sentences) / 2)):
        sent = sentences[2 * i] + sentences[2 * i + 1]
        result.append(sent)

    return result

def generate_ngram(input_list, n):
    result = []
    for i in range(1, n+1):
        result.extend(zip(*[input_list[j:] for j in range(i)]))
    return result


def load_dictionary(filename):
    """
    加载外部词频记录
    :param filename:
    :return:
    """
    word_freq = {}
    print('------> importing words')
    with open(filename, 'r', encoding='utf-8') as f:
        for line in f:
            try:
                line_list = line.strip().split(' ')
                # 规定最少词频
                if int(line_list[1]) > 2:
                    word_freq[line_list[0]] = line_list[1]
            except IndexError as e:
                print(line)
                continue
    f.close()
    return word_freq


def save_model(model, filename):
    # with open(filename, 'wb', encoding = 'utf-8') as fw:
    with open(filename, 'wb') as fw:
        pickle.dump(model, fw)
    fw.close()


def load_model(filename):
    with open(filename, 'rb') as fr:
        model = pickle.load(fr)
    fr.close()
    return model
