# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in 

# import numpy as np # linear algebra
# import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)

# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory

# import os
# print(os.listdir("../input"))

# Any results you write to the current directory are saved as output.
import numpy as np
import pandas as pd
import os
print(os.listdir("../input"))

import tensorflow as tf
import keras
from keras import Model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
K = keras.backend
L = keras.layers

maxlen = 72

def load_and_prec():
	df_train = pd.read_csv("../input/train.csv")
	df_test = pd.read_csv("../input/test.csv")
	
	train_X = df_train['question_text'].fillna("_##_").values
	test_X = df_test['question_text'].fillna("_##_").values
	
	#科学使用Tokenizer的方法是，首先用Tokenizer的 fit_on_texts 方法学习出文本的字典，
	#然后word_index 就是对应的单词和数字的映射关系dict，通过这个dict可以将每个string
	#的每个词转成数字，可以用texts_to_sequences，这是我们需要的，然后通过padding的方
	#法补成同样长度，在用keras中自带的embedding层进行一个向量化

	tokenizer = Tokenizer()				  #文本序列化、向量化
	tokenizer.fit_on_texts(list(train_X)) #学习文本，建立字典
	train_X = tokenizer.texts_to_sequences(train_X)#序列化，单词和数字映射
	test_X = tokenizer.texts_to_sequences(test_X)
	
	x_train = pad_sequences(train_X, maxlen=maxlen)#补成最大句长
	x_test = pad_sequences(test_X, maxlen=maxlen)
	
	y_train = df_train['target'].values
	
	return x_train, x_test, y_train, tokenizer.word_index
	
def load_glove(word_index, max_features):
	EMBEDDING_FILE = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'
	def get_coefs(word, *arr):
		return word, np.asarray(arr, dtype='float32')
	embeddings_index = dict(get_coefs(*o.split(" "))for o in open(EMBEDDING_FILE, encoding='utf-8'))
	
	all_embs = np.stack(embeddings_index.values())
	emb_mean, emb_std = all_embs.mean(), all_embs.std()
	embed_size = all_embs.shape[1]
	
	embedding_matrix = np.random.normal(emb_mean, emb_std, (max_features, embed_size))
	for word, i in word_index.items():
		if i >= max_features:
			continue
		embedding_vector = embeddings_index.get(word)
		if embeding_vector is not None:
			embedding_matrix[i] = embedding_vector
	return embedding_matrix
	
def FastText(embedding_matrix):
	ipt = L.Input(shape=(maxlen,))
	x = L.Embedding(input_dim=embedding_matrix.shape[0],
							  output_dim=embedding_matrix.shape[1],
							  weights=[embedding_matrix],
							  trainable=False)(ipt)
	x = L.GlobalAveragePooling1D()(x)
	out = L.Dense(1, activation='sigmod')(x)
	model = Model(inputs=ipt, outputs=out)
	
	return model

	
x_train, x_test, y_train, word_index = load_and_prec()
max_features = len(word_index)+1

embedding_matrix = load_glove(word_index, max_features)

model = FastText(embedding_matrix)
model. compile(
      loss = 'binary_crossentropy',
      optimizer ='adam' ,
      )

hists=model.fit(x_train, y_train,

      batch_size = 256,epochs = 1,

      verbose = 1,)

###1####TRF#X#########T###
y_pred = model.predict(x_test)
print(y_pred)

sub = pd.read_csv('../input/sample_ submission.csv')
sub.prediction = y_pred > 0.2

sub.to_csv("submission.csv", index=False)