import gensim
import os
from gensim.models.word2vec import Word2Vec, PathLineSentences
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
import matplotlib.pyplot as plt
import json

configs = json.load(open('config/config.json', 'r'))

def lstm_crf(X):
    embedding_size = configs['model']['embedding_size']
    # 隐藏层神经元个数
    unit_num = configs['model']['unit_num']
    dropout_rate = None
    # 输出层神经元个数，与标签的个数对应
    output_size = configs['model']['output_size']
    #
    batch_size = configs['training']['batch_size']
    seq_length = configs['model']['sequence_length']
    lr = configs['model']['lr']

    cell_forward = tf.nn.rnn_cell.BasicLSTMCell(unit_num)
    cell_backward = tf.nn.rnn_cell.BasicLSTMCell(unit_num)

    input_bi_lstm = tf.reshape(X, [batch_size,seq_length, embedding_size])

    bi_ouputs, bi_state = tf.nn.bidirectional_dynamic_rnn(cell_forward, cell_backward,
                                                         input_bi_lstm, dtype=tf.float32)

    bi_ouput = tf.concat(bi_ouputs, axis=2)

    W = tf.get_variable('projection_w', [2*unit_num, output_size])
    b = tf.get_variable('projection_b', [output_size])

    x_reshape = tf.reshape(bi_ouput,[-1, 2 * unit_num])
    projection = tf.matmul(x_reshape, W) + b
    outputs = tf.reshape(projection, [batch_size, seq_length, output_size])
    return outputs




def train(X):
    embedding_size = configs['model']['embedding_size']
    # 隐藏层神经元个数
    unit_num = configs['model']['unit_num']
    dropout_rate = None
    # 输出层神经元个数，与标签的个数对应
    output_size = configs['model']['output_size']
    #
    batch_size = configs['training']['batch_size']
    seq_length = configs['model']['sequence_length']
    lr = configs['model']['lr']

    X = tf.placeholder(tf.float32, shape=[batch_size, seq_length*embedding_size])
    Y = tf.placeholder(tf.float32, shape=[batch_size,seq_length])

    pred = lstm_crf(X)

    real_y = tf.reshape(Y, [batch_size, seq_length])




def train(feature, target, configs):
    # 超参
    embedding_size = configs['model']['embedding_size']
    unit_num = configs['model']['unit_num']
    batch_size = configs['training']['batch_size']
    seq_length = configs['model']['sequence_length']
    lr = configs['model']['lr']
    iter_num = configs['training']['epochs']
    save_path = configs['model']['save_dir']

    X = tf.placeholder(tf.float32, shape=[batch_size,
                                          seq_length*embedding_size])
    Y = tf.placeholder(tf.float32, shape=[batch_size, seq_length])

    pred = lstm_crf(X)