#!/usr/bin/env python
# -*- coding: utf-8 -*-

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import math
from src.manager.log_manager import LogManager

Logger = LogManager.get_logger(__name__)

# 解决tensorflow版本不一致问题
tf.compat.v1.disable_eager_execution()
tf.compat.v1.disable_v2_behavior()


class NnManager:
    """
    神经网络管理器
    """

    def __init__(self):
        pass

    def fpnn_simple(self):
        """
        前向传播神经网络（自定义的数组和矩阵）
        """

        # 无论使用array，还是mat，都可以
        # x = np.array([[0.7, 0.9]], dtype=float)
        # w1 = np.array([[0.2, 0.1, 0.4], [0.3, -0.5, 0.2]], dtype=float)
        # w2 = np.array([[0.6], [0.1], [-0.2]], dtype=float)

        x = np.mat([[0.7, 0.9]])
        w1 = np.mat([[0.2, 0.1, 0.4], [0.3, -0.5, 0.2]])
        w2 = np.mat([[0.6], [0.1], [-0.2]])

        a = tf.matmul(x, w1)
        y = tf.matmul(a, w2)
        Logger.info(y)

    def fpnn_tensorflow(self, train_x, train_label):
        """
        前向传播神经网络（使用tensorflow的变量和常量）
        :param train_x:
        :param train_label:
        :return:
        """
        # 定义训练数据batch的大小
        batch_size = 8

        # 训练数据的行数
        dataset_size = len(train_x)

        # 定义权重，这个是随机生成的，通过seed参数设定了随机种子，这样可以保证每次运行得到的结果是一致的
        w1 = tf.compat.v1.Variable(tf.compat.v1.random_normal([21, 3], stddev=1, seed=1))
        w2 = tf.compat.v1.Variable(tf.compat.v1.random_normal([3, 1], stddev=1, seed=1))

        # 定义偏置项
        b1 = tf.compat.v1.Variable(tf.compat.v1.constant(0.0, shape=[3]))
        b2 = tf.compat.v1.Variable(tf.compat.v1.constant(0.0, shape=[1]))

        # 定义特征向量，参数shape也可以不定义
        x = tf.compat.v1.placeholder(tf.float32, name="x-input")
        y_ = tf.compat.v1.placeholder(tf.float32, name="y-input")

        # 使用前向传播算法获得神经网络的输出
        # a = tf.compat.v1.matmul(x, w1)
        # y = tf.compat.v1.matmul(a, w2)
        # 使用激活函数ReLU
        a = tf.nn.relu(tf.compat.v1.matmul(x, w1) + b1)
        y = tf.nn.relu(tf.compat.v1.matmul(a, w2) + b2)

        # 分类问题，定义损失函数（softmax，交叉熵）
        # y = tf.compat.v1.sigmoid(y)
        # cross_entropy = -tf.compat.v1.reduce_mean(y_ * tf.compat.v1.log(tf.compat.v1.clip_by_value(y, 1e-10, 1.0))
        #                                           + (1 - y_) * tf.compat.v1.log(tf.compat.v1.clip_by_value(1 - y, 1e-10, 1 - 0)))

        # 回归问题，定义损失函数（均方误差），再取平均数
        cross_entropy = tf.compat.v1.reduce_mean(tf.compat.v1.square(train_label - y))

        # 学习率
        learning_rate = 0.01
        # 反向传播的算法
        train_step = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(cross_entropy)

        # 开启会话
        with tf.compat.v1.Session() as sess:
            # 初始化所有变量
            init_op = tf.compat.v1.global_variables_initializer()
            sess.run(init_op)
            Logger.info(sess.run(w1))
            Logger.info(sess.run(w2))
            # 设定训练轮数
            STEPS = 5000
            for i in range(STEPS):
                start = (i * batch_size) % dataset_size
                end = min(start + batch_size, dataset_size)
                sess.run(train_step, feed_dict={x: train_x[start:end], y_: train_label[start:end]})
                if i % 1000 == 0:
                    total_cross_entropy = sess.run(cross_entropy, feed_dict={x: train_x, y_: train_label})
                    Logger.info("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy))

            Logger.info(sess.run(w1))
            Logger.info(sess.run(w2))
            # 关闭会话
            sess.close()

    def test_variable_and_tensor(self):
        """
        测试变量和张量
        :return:
        """
        arr = tf.compat.v1.random_normal([2, 3], stddev=2)
        Logger.info(arr)
        weight = tf.Variable(tf.compat.v1.random_normal([2, 3], stddev=2))
        Logger.info(weight)
        zeros = tf.compat.v1.zeros([2, 3])
        Logger.info(zeros)
