# coding:utf-8
# Author : hiicy redldw
# Date : 2019/05/22

######### 文字拆分   ##########
import codecs
import json

import jieba
import keras
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.preprocessing import OneHotEncoder, LabelEncoder


def seg_sentence(self, sentence, stopwords_path=None):
    """
    对句子进行分词
    """
    def stopwordslist(filepath):
        """
        创建停用词list ,闭包
        """
        stopwords = [line.strip() for line in codecs.open(filepath, 'r', encoding='gb2312').readlines()]
        print('cu')
        return stopwords

    sentence_seged = jieba.cut(sentence.strip())
    stopwords = stopwordslist(stopwords_path)  # 这里加载停用词的路径
    outstr = ''  # 返回值是字符串
    for word in sentence_seged:
        if word not in stopwords:
            if word != '\t':
                outstr += word
                outstr += " "
    return outstr

######### 建立字典 #######################
# 基本式
from collections import Counter
# 这个用的字维度
def getVocabularyList(content_list, vocabulary_size):
    allContent_str = ''.join(content_list)
    counter = Counter(allContent_str)
    vocabulary_list = [k[0] for k in counter.most_common(vocabulary_size)]
    return ['PAD'] + vocabulary_list
word2id_dict = dict([(b, a) for a, b in enumerate(vocabulary_list)])
content2idList = lambda content : [word2id_dict[word] for word in content if word in word2id_dict]

# keras
def TokenFile(self,flag):
    import keras.preprocessing.text as T
    tokenier = T.Tokenizer(num_words=1000,oov_token="<UNK>")
    tokenier.fit_on_texts(contents)
    wi = tokenier.word_index  # REW:补充修改未知词
    wj = {k: v + 1 for k, v in wi.items()}
    wj['<UNK>'] = 1
    contents = self.tokenier.texts_to_sequences(contents)
# 填充
    contents = sequence.pad_sequences(contents,
                                      maxlen=C.MAX_LENGTH,
                                      value=0,
                                      padding='post',
                                      )  # 单词数量+空格+结尾符
    labelTOid,idTOlabel = self._mapLabel()
    if flag == "train":
        with codecs.open(C.labelMapPath, 'w', encoding='utf-8') as f:
            json.dump(idTOlabel,f)
    labels = []
    for label in self.labels:
        labels.append(labelTOid[label])
    labels = keras.utils.to_categorical(np.array(labels))
    return contents,labels
############ 对标签编码 ###############
le = LabelEncoder()
train_y = le.fit_transform(train_y).reshape(-1,1)
val_y = le.transform(val_y).reshape(-1,1)
test_y = le.transform(test_y).reshape(-1,1)

## 对数据集的标签数据进行one-hot编码
ohe = OneHotEncoder()
train_y = ohe.fit_transform(train_y).toarray()
val_y = ohe.transform(val_y).toarray()
test_y = ohe.transform(test_y).toarray()
# 或者
keras.utils.to_categorical(np.array(train_y))

########### tf idf ###############
# 一是计算tf-idf是全量计算，所以需要将train+test+val的所有corpus都相加
# 二是为了防止文本特征过大，需要去低频词
corpus_set = train_set + val_set + test_set
print("length of corpus is: " + str(len(corpus_set)),type(corpus_set))
# 词频矩阵：矩阵元素a[i][j] 表示j词在i类文本下的词频
vectorizer=CountVectorizer(min_df=1e-5) # drop df < 1e-5,去低频词

transformer = TfidfVectorizer()
t = vectorizer.fit_transform(corpus_set)
# 元素w[i][j]表示j词在i类文本中的tf-idf权重
tfidf = transformer.fit_transform(t)
#获取词袋模型中的所有词语
words = vectorizer.get_feature_names()

##############################

def extract_character_vocab(data):
    '''
    构造映射表
    '''
    special_words = ['<PAD>', '<UNK>', '<GO>',  '<EOS>']

    set_words = list(set([character for line in data.split('\n') for character in line]))
    # 这里要把四个特殊字符添加进词典
    int_to_vocab = {idx: word for idx, word in enumerate(special_words + set_words)}
    vocab_to_int = {word: idx for idx, word in int_to_vocab.items()}

    return int_to_vocab, vocab_to_int

