# coding:utf-8
'''
数据预处理
@author:wangyi
'''
import pandas as pd
from collections import Counter
import os
import numpy as np
import tensorflow.contrib.keras as kr
# 获取文本内容
def get_contents_labels(train_file_path,char_or_word):
    df = pd.read_csv(train_file_path, encoding='utf-8')
    if char_or_word == 'char':
        contents = list(df['article'])  # 字符级
    elif char_or_word == 'word':
        contents = list(df['word_seg'])  # 词级
    else:
        raise ValueError("please choose level of text! char or word ")
    labels = list(df['class'])
    return contents,labels

# 获取标签及id
def get_label_id(label_path):
    with open(label_path,encoding='utf-8') as f:
        labels = [l.replace('\n','').strip() for l in f.readlines()]
    label_id =  {label:i for i,label in enumerate(labels)}
    return label_id

'''
构建词典

'''
def create_vocab(contents,vocab_path,vocab_size=5000):
    words = []
    for content in contents:
        content = str(content).split()
        words.extend(content)
    # 统计词频
    counter_result = Counter(words)  # [(词，词频)]
    # 返回前vocab_size个词
    counter_result = counter_result.most_common(vocab_size)
    # 将词和词频分离
    word, freq = list(zip(*counter_result))  # [(词),(词频)]
    # 加占位符
    word = ['<PAD>'] + list(word)
    with open(vocab_path, 'w', encoding='utf-8') as out:
        for i, w in enumerate(word):
            out.write(w)
            if i != len(word) - 1:
                out.write('\n')
    return word

# 构建词与id编号的映射
def create_word_label(file_path,char_or_word,vocab_path,vocab_size=5000):

    contents,labels = get_contents_labels(file_path,char_or_word)
    if not os.path.exists(vocab_path):
        word = create_vocab(contents,vocab_path,vocab_size)
    else:
        with open(vocab_path,encoding='utf-8') as vb:
            word = [v.replace('\n','').strip() for v in vb.readlines()]
    # 将词转化为id
    word_to_id = dict(zip(word, range(len(word))))

    return word_to_id


def get_input_x_y_from_data(file_path,char_or_word,word_to_id,label_to_id,maxlen=400):
    contents,labels = get_contents_labels(file_path,char_or_word)
    x_id = []
    y_id = np.zeros([len(labels),len(label_to_id)])
    for content in contents:
        x_id.append([word_to_id[str(con)] for con in str(content).split(' ') if con in word_to_id])
    for i,label in enumerate(labels):
        if str(label) in label_to_id:
            y_id[i][label_to_id[str(label)]] = 1
    x_id = kr.preprocessing.sequence.pad_sequences(x_id,maxlen,padding='post',truncating='post')

    return x_id,y_id


if __name__ == '__main__':
    print('prossing.....')
