#coding=utf8
# 训练过程 / 定义后向传播过程
import tensorflow as tf
import model
import pandas as pd
import numpy as np

# 数据文件
train_data_file = r'./data/train.csv'
test_data_file = r'./data/test.csv'
test_label_file = r'./data/gender_submission.csv'
model_save_path = r'./ckpt/model'
output_file = r'./result.csv'

learning_rate = 0.001
BATCH_SIZE = 20

input_size = 11
hidden_size = 20
output_size = 2

epoch = 2000
KEEP_PROB = 0.5


def get_train_data():
    # 提取训练数据
    # 读取 csv 数据
    data = pd.read_csv(train_data_file, 
        sep=',', 
        dtype={
            'Name' : 'str',
            'Survived' : 'int64',
            'Pclass' : 'float32',   
            'Sex' : 'str',
            'Age' : 'float32',
            'SibSp' : 'float32',
            'Parch' : 'float32',
            'Fare' : 'float32',
            'Embarked' : 'str',
        }
    )

    # 使用 随机森林 预测 Age 字段的缺失值
    from sklearn.ensemble import RandomForestRegressor
    age = data[['Age','Survived','Fare','Parch','SibSp','Pclass']]
    age_notnull = age.loc[(data.Age.notnull())]
    age_isnull = age.loc[(data.Age.isnull())]
    X = age_notnull.values[:,1:]
    Y = age_notnull.values[:,0]
    rfr = RandomForestRegressor(n_estimators=1000,n_jobs=-1)
    rfr.fit(X,Y)
    predictAges = rfr.predict(age_isnull.values[:,1:])
    data.loc[(data.Age.isnull()),'Age'] = predictAges

    # 提取标签
    y_ = data.loc[:,'Survived']
    y_0 = y_.map(lambda x: 0 if x==1 else 1)
    y_1 = y_
    y_ = pd.concat([y_0, y_1], axis=1)
    # 重命名列名
    y_.columns = ['Dead','Survived']
    # 转换数据类型
    y_ = y_.astype('float32')
    y_ = y_.values
    # print(type(y_))
    # print(type(y_.values))
    # print(y_.dtypes)

    # 清洗数据
    x = data.loc[:,['Name','Pclass','Sex','Age','SibSp','Parch','Fare','Embarked']]
    # 填充 Age 列中的 NaN 值为 0
    # x['Age'] = x['Age'].fillna(0)
    # 将 Sex 列中的性别字符串替换为数字
    x['Sex'] = x['Sex'].replace(['female', 'male'],[0,1]).astype('int32')
    # 将 Embarked 出发地的字符替换为数字，同时将列中 NaN 值替换为 0
    # x['Embarked'] = x['Embarked'].fillna('').replace(['','C','Q','S'],[2.0,0.0,1.0,2.0]).astype('float32')
    x['Embarked'] = x['Embarked'].fillna('S')
    mapping = {'C':0,'Q':1,'S':2}
    x['Embarked'] = x['Embarked'].map(mapping)
    # 将 Fare 收入中为 NaN 的填充为0
    x['Fare'] = x['Fare'].fillna(x['Fare'].median())
    # x['Fare'] = x['Fare'].fillna(0)

    # 特征工程：构造新的特征
    # 添加一个 Child 字段，<=12 的为小孩子，设为1，否则为0
    x['Child'] = x.Age.apply(lambda x: 1 if x<=16 else 0).astype('int32')
    # 添加一个 FamilySize 字段
    x['FamilySize'] = x['SibSp'] + x['Parch'] + 1
    x['FamilySize'] = x['FamilySize'].astype('int32')
    # 添加一个 IsAlone 字段，如果 FamilySize==1，则为1，否则为0
    x['IsAlone'] = x.FamilySize.apply(lambda x: 1 if x==1 else 0)
    # 划分年龄区间
    # x['Age_bin'] = pd.cut(x['Age'], bins=[0,12,20,40,1200], 
    #                     labels=['Children','Teenage','Adult','Elder'])
    x['Age_bin'] = pd.cut(x['Age'], bins=[0,16,32,48,1200], 
                        labels=['Children','Teenage','Adult','Elder'])
    mapping = {'Children':0,'Teenage':1,'Adult':2,'Elder':3}
    x['Age_bin'] = x['Age_bin'].map(mapping)


    # 划分收入区间
    # x['Fare_bin'] = pd.cut(x['Fare'], bins=[0,7.91,14.45,31,1200], 
    #                     labels=['Low_fare','median_fare','Average_fare','high_fare'])
    x['Fare_bin'] = pd.cut(x['Fare'], bins=[-1,7.91,14.45,31,12000], 
                        labels=['Low_fare','median_fare','Average_fare','high_fare'])
    mapping = {'Low_fare':0,'median_fare':1,'Average_fare':2,'high_fare':3}
    x['Fare_bin'] = x['Fare_bin'].map(mapping)
    # print(np.where(np.isnan(x['Fare_bin'])))
    
    

    # 处理 Name 字段
    import re
    # Define function to extract titles from passenger names
    def get_title(name):
        title_search = re.search(' ([A-Za-z]+)\.', name)
        # If the title exists, extract and return it.
        if title_search:
            return title_search.group(1)
        return ""
    # Create a new feature Title, containing the titles of passenger names
    x['Title'] = x['Name'].apply(get_title)
    # Group all non-common titles into one single grouping "Rare"
    x['Title'] = x['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 
                                                'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')

    x['Title'] = x['Title'].replace('Mlle', 'Miss')
    x['Title'] = x['Title'].replace('Ms', 'Miss')
    x['Title'] = x['Title'].replace('Mme', 'Mrs')

    mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
    x['Title'] = x['Title'].map(mapping)
    x['Title'] = x['Title'].fillna(0)


    # 转换为 one-hot 编码
    # x = pd.get_dummies(x,  columns = ["Sex", "Title", "Embarked", "Age_bin","Fare_bin"],
    #                         prefix = ["Sex", "Title", "Embarked", "Age_type","Fare_type"])

    # 丢弃无用字段
    x.drop(["Name", "Age", "Fare"], axis=1, inplace=True)

    # print(x)
    # print(x.info())
    print(x.head())

    
    x = x.values
    # print(x.dtypes)

    return x, y_

def get_test_data():
    # 提取测试数据
    # 读取 csv 数据
    data = pd.read_csv(test_data_file, 
        sep=',', 
        dtype={
            'Name' : 'str',
            'Pclass' : 'float32',
            'Sex' : 'str',
            'Age' : 'float32',
            'SibSp' : 'float32',
            'Parch' : 'float32',
            'Fare' : 'float32',
            'Embarked' : 'str',
        }
    )

    # 使用 随机森林 预测 Age 字段的缺失值
    from sklearn.ensemble import RandomForestRegressor
    # 将 Fare 收入中为 NaN 的填充为0
    data['Fare'] = data['Fare'].fillna(0)
    age = data[['Age','Fare','Parch','SibSp','Pclass']]
    age_notnull = age.loc[(data.Age.notnull())]
    age_isnull = age.loc[(data.Age.isnull())]
    X = age_notnull.values[:,1:]
    Y = age_notnull.values[:,0]
    rfr = RandomForestRegressor(n_estimators=1000,n_jobs=-1)
    rfr.fit(X,Y)
    predictAges = rfr.predict(age_isnull.values[:,1:])
    data.loc[(data.Age.isnull()),'Age'] = predictAges

    # 提取测试数据
    x = data.loc[:,['Name','Pclass','Sex','Age','SibSp','Parch','Fare','Embarked']]
    # 填充 Age 列中的 NaN 值为 0
    # x['Age'] = x['Age'].fillna(0)
    # 将 Sex 列中的性别字符串替换为数字
    x['Sex'] = x['Sex'].replace(['female', 'male'],[0,1]).astype('int32')
    # 将 Embarked 出发地的字符替换为数字，同时将列中 NaN 值替换为 0
    # x['Embarked'] = x['Embarked'].fillna('').replace(['','C','Q','S'],[2.0,0.0,1.0,2.0]).astype('float32')
    x['Embarked'] = x['Embarked'].fillna('S')
    mapping = {'C':0,'Q':1,'S':2}
    x['Embarked'] = x['Embarked'].map(mapping)
    # 将 Fare 收入中为 NaN 的填充为0
    # x['Fare'] = x['Fare'].fillna(0)
    x['Fare'] = x['Fare'].fillna(x['Fare'].median())
    

    # 优化
    # 添加一个 Child 字段，<=12 的为小孩子，设为1，否则为0
    x['Child'] = x.Age.apply(lambda x: 1 if x<=16 else 0).astype('int32')
    # 添加一个 FamilySize 字段
    x['FamilySize'] = x['SibSp'] + x['Parch'] + 1
    x['FamilySize'] = x['FamilySize'].astype('int32')
    # 添加一个 IsAlone 字段，如果 FamilySize==1，则为1，否则为0
    x['IsAlone'] = x.FamilySize.apply(lambda x: 1 if x==1 else 0)
    # 划分年龄区间
    # x['Age_bin'] = pd.cut(x['Age'], bins=[0,12,20,40,1200], 
    #                     labels=['Children','Teenage','Adult','Elder'])
    x['Age_bin'] = pd.cut(x['Age'], bins=[0,16,32,48,1200], 
                        labels=['Children','Teenage','Adult','Elder'])
    mapping = {'Children':0,'Teenage':1,'Adult':2,'Elder':3}
    x['Age_bin'] = x['Age_bin'].map(mapping)


    # 划分收入区间
    # x['Fare_bin'] = pd.cut(x['Fare'], bins=[0,7.91,14.45,31,1200], 
    #                     labels=['Low_fare','median_fare','Average_fare','high_fare'])
    x['Fare_bin'] = pd.cut(x['Fare'], bins=[-1,7.91,14.45,31,12000], 
                        labels=['Low_fare','median_fare','Average_fare','high_fare'])
    mapping = {'Low_fare':0,'median_fare':1,'Average_fare':2,'high_fare':3}
    x['Fare_bin'] = x['Fare_bin'].map(mapping)


    # 处理 Name 字段
    import re
    # Define function to extract titles from passenger names
    def get_title(name):
        title_search = re.search(' ([A-Za-z]+)\.', name)
        # If the title exists, extract and return it.
        if title_search:
            return title_search.group(1)
        return ""
    # Create a new feature Title, containing the titles of passenger names
    x['Title'] = x['Name'].apply(get_title)
    # Group all non-common titles into one single grouping "Rare"
    x['Title'] = x['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 
                                                'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')

    x['Title'] = x['Title'].replace('Mlle', 'Miss')
    x['Title'] = x['Title'].replace('Ms', 'Miss')
    x['Title'] = x['Title'].replace('Mme', 'Mrs')

    mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
    x['Title'] = x['Title'].map(mapping)
    x['Title'] = x['Title'].fillna(0)

    # 转换为 one-hot 编码
    # x = pd.get_dummies(x,  columns = ["Sex", "Title", "Embarked", "Age_bin","Fare_bin"],
    #                         prefix = ["Sex", "Title", "Embarked", "Age_type","Fare_type"])

    # 丢弃无用字段
    x.drop(["Name", "Age", "Fare"], axis=1, inplace=True)

    # print(x)
    # print(x.info())

    x = x.values

    # 读取 csv 数据
    data = pd.read_csv(test_label_file, 
        sep=',', 
        dtype={
            'PassengerId' : 'int32',
            'Survived' : 'int32'
        }
    )
    # 提取标签
    y_ = data.loc[:,'Survived']
    y_0 = y_.map(lambda x: 0 if x==1 else 1)
    y_1 = y_
    y_ = pd.concat([y_0, y_1], axis=1)
    # 重命名列名
    y_.columns = ['Dead','Survived']
    # 转换数据类型
    y_ = y_.astype('float32')
    y_ = y_.values
    # print(type(y_))
    # print(type(y_.values))
    # print(y_.dtypes)

    PassengerId = data.loc[:,'PassengerId'].astype('int32')
    PassengerId = PassengerId.values
    PassengerId = PassengerId.reshape((-1,1))


    return x, y_, PassengerId



def Train():
    # 训练过程
    X, Y_ = get_train_data()

    # ## 使用 逻辑回归 start
    # Y_ = np.argmax(Y_,1)
    # # 提取测试数据
    # X_test, Y_test, PassengerId = get_test_data()
    # Y_test = np.argmax(Y_test,1)

    # # machine learning
    # from sklearn.linear_model import LogisticRegression # Logistic Regression
    # from sklearn.metrics import accuracy_score

    # model = LogisticRegression()
    # model.fit(X,Y_)
    # prediction_lr=model.predict(X_test)
    # print('The accuracy of the Logistic Regression is',round(accuracy_score(prediction_lr,Y_test)*100,2))
    
    # Survived = prediction_lr.reshape((-1,1))
    # result = np.hstack((PassengerId, Survived))
    # result = pd.DataFrame(result, columns=['PassengerId', 'Survived'])
    # result.to_csv(output_file, sep=',', encoding='utf-8', index=False)

    # return 
    # ## 使用 逻辑回归 end

    
    # 模型
    w1 = tf.Variable(tf.random_normal([input_size, hidden_size], stddev=1.0, seed=2.0))
    w2 = tf.Variable(tf.random_normal([hidden_size, output_size], stddev=1.0, seed=2.0))
    b1 = tf.Variable(tf.zeros([hidden_size]), name='bias1')
    b2 = tf.Variable(tf.zeros([output_size]), name='bias2')

    x, y_, keep_prob = model.input_placeholder(input_size, output_size)
    y = model.forward(x, w1, w2, b1, b2, keep_prob=keep_prob)
    loss = model.loss(y, y_)
    y = tf.nn.softmax(y)
    accuary = model.accuary(y, y_)

    # 定义训练（反向传播）过程
    # train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
    train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
    
    # tf saver
    saver = tf.train.Saver()
    # if os.path.isfile(FLAGS.save_name):
    #     saver.restore(sess, FLAGS.save_name)

    with tf.Session() as sess:
        # 变量初始化
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        # print(sess.run(model.w1))

        # 训练
        # epoch = 10000
        print('Train start...')
        for i in range(epoch):
            for j in range(len(Y_) // BATCH_SIZE + 1):
                start = j * BATCH_SIZE
                end = start + BATCH_SIZE
                # 反向传播
                _, loss_result, y_result, acc_result = sess.run([train_op, loss, y, accuary], feed_dict={x:X[start:end], y_:Y_[start:end], keep_prob: KEEP_PROB})
            print(i, sess.run([loss, accuary], feed_dict={x:X, y_:Y_, keep_prob: 1.0}))
            # print(i, loss_result)
        # print(sess.run([y], feed_dict={x:X, y_:Y_, keep_prob: 1.0}))
        print('Train end.')

        # 保存模型到本地
        print('Saving model...')
        saver.save(sess, model_save_path)
        print('Save finally.')
        
def Test():
    # 测试
    # 提取测试数据
    X, Y_, PassengerId = get_test_data()

    # 模型
    w1 = tf.Variable(tf.random_normal([input_size, hidden_size], stddev=1.0, seed=2.0))
    w2 = tf.Variable(tf.random_normal([hidden_size, output_size], stddev=1.0, seed=2.0))
    b1 = tf.Variable(tf.zeros([hidden_size]), name='bias1')
    b2 = tf.Variable(tf.zeros([output_size]), name='bias2')

    x, y_, keep_prob = model.input_placeholder(input_size, output_size)
    y = model.forward(x, w1, w2, b1, b2, keep_prob=keep_prob)
    loss = model.loss(y, y_)
    y = tf.nn.softmax(y)
    accuary = model.accuary(y, y_)
    y = tf.argmax(y, 1)

    #保存模型对象saver
    saver = tf.train.Saver()

    with tf.Session() as sess:
        # 变量初始化
        saver.restore(sess, model_save_path)
        loss, y, acc_result = sess.run([loss, y, accuary], feed_dict={x:X, y_:Y_, keep_prob: 1.0})
        # print(y.shape)
        # print(PassengerId.shape)
        # np.set_printoptions(threshold=np.inf)
        # tmp = np.hstack((PassengerId,y))
        # print(tmp)
        print(loss)
        print(y)
        print(acc_result)
    
    # 生成输出文件
    # 准备输出数据
    # f = np.vectorize(lambda x: 1 if x>0.5 else 0, otypes=[np.int32])
    # Survived = f(y[:,1])
    # print(Survived)
    Survived = y.reshape((-1,1))
    result = np.hstack((PassengerId, Survived))
    result = pd.DataFrame(result, columns=['PassengerId', 'Survived'])
    result.to_csv(output_file, sep=',', encoding='utf-8', index=False)

def main():
    # 主函数
    Train()
    # Test()

if __name__ == '__main__':
    main()
