# -*- coding: utf-8 -*-
# 导入必要的包
import os
import sys

import psycopg2
import pymysql
from multiprocessing import cpu_count

import matplotlib.pyplot as plt
import numpy as np
import paddle
import paddle.fluid as fluid

# 获取py 文件所在目录
current_path = os.path.dirname(__file__)
# 把这个目录设置成工作目录
os.chdir(current_path)

paddle.enable_static()


# 获取字典长度
def get_dict_len(dict_path):
    with open(dict_path, 'r', encoding='utf-8')as f:
        line = eval(f.readlines()[0])

    return len(line.keys())


# 创建数据读取器train_reader 和 test_reader
# 训练/测试数据的预处理
def data_mapper(sample):
    data, label = sample
    data = [int(data) for data in data.split(',')]
    return data, int(label)


# 创建数据读取器train_reader
def train_reader(train_list_path):
    def reader():
        with open(train_list_path, 'r', encoding="utf-8") as f:
            lines = f.readlines()
            # 打乱数据
            np.random.shuffle(lines)
            # 获取问题和对应分类
            for line in lines:
                data, label = line.split('\t')
                yield data, label

    return paddle.reader.xmap_readers(data_mapper, reader, cpu_count(), 1024)


# 创建数据读取器test_reader
def test_reader(test_list_path):
    def reader():
        with open(test_list_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                data, label = line.split('\t')
                yield data, label

    return paddle.reader.xmap_readers(data_mapper, reader, cpu_count(), 1024)


def draw_train_process(title, iters, costs, accs, label_cost, label_acc):
    fig, ax1 = plt.subplots()
    plt.plot(iters, costs, 'b', label=label_cost)

    plt.grid(True)
    plt.axis('tight')
    plt.title(title, fontsize=24)
    plt.xlabel("iter", fontsize=20)
    plt.ylabel("cost", fontsize=20)

    plt.legend(loc="center")

    ax2 = ax1.twinx()
    plt.plot(iters, accs, color='green', label=label_acc)
    plt.ylabel("acc", fontsize=20)
    plt.legend(loc="center right")

    plt.show()


# 创建CNN网络
def CNN_net(data, dict_dim, class_dim, emb_dim=128, hid_dim=128, hid_dim2=98):
    emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
    conv_3 = fluid.nets.sequence_conv_pool(
        input=emb,
        num_filters=hid_dim,
        filter_size=3,
        act="tanh",
        pool_type="sqrt")
    conv_4 = fluid.nets.sequence_conv_pool(
        input=emb,
        num_filters=hid_dim2,
        filter_size=4,
        act="tanh",
        pool_type="sqrt")
    output = fluid.layers.fc(
        input=[conv_3, conv_4], size=class_dim, act="softmax")
    return output


# 获取字典长度
def get_faq_len():
    connect = pymysql.Connect(
        host="192.168.8.23",
        port=3306,
        user="root",
        passwd="root",
        db="ld_qa",
        charset='utf8'
    )
    cursor = connect.cursor()
    sql = "select max(id) from qa_fqa"
    cursor.execute(sql)
    count = cursor.fetchall()
    cursor.close()
    connect.close()
    return count[0][0] + 1


def get_faq_len_by_postGres():
    connect = psycopg2.connect(
        database="jjgj_db_23", user="postgres", password="123456", host="192.168.8.25", port="5432"
    )
    cursor = connect.cursor()
    sql = "select max(id) from qa_fqa"
    cursor.execute(sql)
    count = cursor.fetchall()
    cursor.close()
    connect.close()
    return count[0][0] + 1


# 定义输入函数
words = fluid.layers.data(append_batch_size=True, name='word', shape=[1], dtype='int64', lod_level=1)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')


# 开始训练
def start_train():
    # 创建一个执行器
    # place = fluid.CUDAPlace(0) # GPU
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    model_save_dir = '../model/work/infer_model/'
    data_root_path = "../dataset"
    # 获取训练数据读取器和测试数据读取器
    train_reader_ = paddle.batch(reader=train_reader(data_root_path + '/train_list.txt'), batch_size=128)
    # test_reader_ = paddle.batch(reader=test_reader(data_root_path + '/test_list.txt'), batch_size=128)
    # 获取数据字典长度
    dict_dim = get_dict_len(data_root_path + '/dict_txt.txt')
    class_dim = get_faq_len_by_postGres()
    # class_dim = get_faq_len()

    # 获取分类器
    model = CNN_net(words, dict_dim, class_dim)

    # 获取损失函数和准确率
    cost = fluid.layers.cross_entropy(input=model, label=label)
    avg_cost = fluid.layers.mean(cost)
    acc = fluid.layers.accuracy(input=model, label=label)

    # 获取预测程序
    # test_program = fluid.default_main_program().clone(for_test=True)

    # 定义优化方法
    optimizer = fluid.optimizer.AdadeltaOptimizer(learning_rate=0.002)
    opt = optimizer.minimize(avg_cost)

    # 进行参数初始化
    exe.run(fluid.default_startup_program())

    feeder = fluid.DataFeeder(place=place, feed_list=[words, label])

    # 展示模型训练曲线
    all_train_iter = 0
    all_train_iters = []
    all_train_costs = []
    all_train_accs = []
    for pass_id in range(class_dim):
        # 进行训练
        # train_cost = 0  # ty
        for batch_id, data in enumerate(train_reader_()):
            train_cost, train_acc = exe.run(program=fluid.default_main_program(),
                                            feed=feeder.feed(data),
                                            fetch_list=[avg_cost, acc])

            all_train_iter = all_train_iter + 128
            all_train_iters.append(all_train_iter)
            all_train_costs.append(train_cost[0])
            all_train_accs.append(train_acc[0])

            if batch_id % 10 == 0:
                print('Pass:%d, Batch:%d, Cost:%0.5f, Acc:%0.5f' % (pass_id, batch_id, train_cost[0], train_acc[0]))
        # 进行测试
        # test_costs = []
        # test_accs = []
        # for batch_id, data in enumerate(test_reader_()):
        #     test_cost, test_acc = exe.run(program=test_program,
        #                                   feed=feeder.feed(data),
        #                                   fetch_list=[avg_cost, acc])
        #     test_costs.append(test_cost[0])
        #     test_accs.append(test_acc[0])
        # # 计算平均预测损失在和准确率
        # test_cost = (sum(test_costs) / len(test_costs))
        # test_acc = (sum(test_accs) / len(test_accs))
        # print('Test:%d, Cost:%0.5f, ACC:%0.5f' % (pass_id, test_cost, test_acc))

    # 保存预测模型
    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)
    fluid.io.save_inference_model(model_save_dir,
                                  feeded_var_names=[words.name],
                                  target_vars=[model],
                                  executor=exe)

    print('训练模型保存完成！')


if __name__ == '__main__':
    start_train()
