import os
import random

import numpy as np
import tensorflow as tf

import GameMap
from cnn_gobang.config import GameConfig, GameData


# 定义DQN
class DQN:
    def __init__(self):
        self.n_input = GameConfig.map_size * GameConfig.map_size
        self.n_output = 1
        self.current_q_step = 0
        self.avg_loss = 0
        self.train_times = 0
        self.x = tf.placeholder("float", [None, GameConfig.map_size, GameConfig.map_size], name='x')
        self.y = tf.placeholder("float", [None, self.n_output], name='y')
        self.create_q_network()
        self.create_training_method()
        self.saver = tf.train.Saver()
        self.sess = tf.Session()
        # self.sess = tf.InteractiveSession()
        self.sess.run(tf.initialize_all_variables())

    def create_q_network(self):
        wc1 = tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1), dtype=tf.float32, name='wc1')
        wc2 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.1), dtype=tf.float32, name='wc2')
        wc3 = tf.Variable(tf.random_normal([3, 3, 128, 256], stddev=0.1), dtype=tf.float32, name='wc3')
        wd1 = tf.Variable(tf.random_normal([256, 128], stddev=0.1), dtype=tf.float32, name='wd1')
        wd2 = tf.Variable(tf.random_normal([128, self.n_output], stddev=0.1), dtype=tf.float32, name='wd2')

        bc1 = tf.Variable(tf.random_normal([64], stddev=0.1), dtype=tf.float32, name='bc1')
        bc2 = tf.Variable(tf.random_normal([128], stddev=0.1), dtype=tf.float32, name='bc2')
        bc3 = tf.Variable(tf.random_normal([256], stddev=0.1), dtype=tf.float32, name='bc3')
        bd1 = tf.Variable(tf.random_normal([128], stddev=0.1), dtype=tf.float32, name='bd1')
        bd2 = tf.Variable(tf.random_normal([self.n_output], stddev=0.1), dtype=tf.float32, name='bd2')

        weights = {
            'wc1': wc1,
            'wc2': wc2,
            'wc3': wc3,
            'wd1': wd1,
            'wd2': wd2
        }

        biases = {
            'bc1': bc1,
            'bc2': bc2,
            'bc3': bc3,
            'bd1': bd1,
            'bd2': bd2
        }

        self.q_value = self.conv_basic(self.x, weights, biases)

    def conv_basic(self, _input, _w, _b):
        # input
        _out = tf.reshape(_input, shape=[-1, GameConfig.map_size, GameConfig.map_size, 1])
        # conv layer 1
        _out = tf.nn.conv2d(_out, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')
        _out = tf.nn.relu(tf.nn.bias_add(_out, _b['bc1']))
        _out = tf.nn.max_pool(_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        # conv layer2
        _out = tf.nn.conv2d(_out, _w['wc2'], strides=[1, 1, 1, 1], padding='SAME')
        _out = tf.nn.relu(tf.nn.bias_add(_out, _b['bc2']))
        _out = tf.nn.max_pool(_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        # conv layer3
        _out = tf.nn.conv2d(_out, _w['wc3'], strides=[1, 1, 1, 1], padding='SAME')
        _out = tf.nn.relu(tf.nn.bias_add(_out, _b['bc3']))
        _out = tf.reduce_mean(_out, [1, 2])
        # fully connected layer1
        _out = tf.nn.relu(tf.add(tf.matmul(_out, _w['wd1']), _b['bd1']))
        # fully connected layer2
        _out = tf.add(tf.matmul(_out, _w['wd2']), _b['bd2'])
        return _out

    def create_training_method(self):
        # self.cost = tf.reduce_mean(self.LosFunction(logits=self.q_value, labels=self.y))
        self.cost = tf.reduce_mean(tf.squared_difference(self.q_value, self.y))
        self.optm = tf.train.AdamOptimizer(learning_rate=0.001, name='Adam').minimize(self.cost)

    def restore(self):
        if os.path.exists(GameConfig.saver_path + '-0.index'):
            self.saver.restore(self.sess, os.path.abspath(GameConfig.saver_path + '-0'))

    # 黑棋代表电脑 如果该白旗走的话 用黑白反转棋盘
    def computer_play(self):
        boards = GameMap.getBoards()
        positions = GameMap.getPositions()
        if len(positions) == 0:
            return 0, 0, 0

        next_step = self.sess.run(self.q_value, feed_dict={self.x: boards})

        max_position = [0, 0]
        max_value = -1000  # 实际最大价值  用于后续学习
        for i in range(len(positions)):
            value = next_step[i] + random.randint(0, 10) / 1000  # 如果没有最优步子 则随机选择一步
            if value > max_value:
                max_value = value
                max_position = positions[i]
        rdm = random.randint(0, 100)
        if GameData.AutoPlayTimes > 0 and rdm > 95:
            step = random.randint(0, len(positions) - 1)
            max_position = positions[step]
        return max_position[0], max_position[1], max_value

    def train_once(self, winner):
        board1 = np.array(GameData.mapRecords1)
        board2 = np.array(GameData.mapRecords2)
        step1 = np.array(GameData.stepRecords1)
        step2 = np.array(GameData.stepRecords2)
        score_record1 = np.array(GameData.scoreRecords1)
        score_record2 = np.array(GameData.scoreRecords2)
        board1 = np.reshape(board1, [-1, GameConfig.map_size, GameConfig.map_size])
        board2 = np.reshape(board2, [-1, GameConfig.map_size, GameConfig.map_size])
        step1 = np.reshape(step1, [-1, GameConfig.map_size, GameConfig.map_size])
        step2 = np.reshape(step2, [-1, GameConfig.map_size, GameConfig.map_size])

        score1 = []
        score2 = []

        board1 = (board1 * (1 - step1)) + step1 * GameConfig.black_code
        board2 = (board2 * (1 - step2)) + step2 * GameConfig.black_code
        # 每步的价值 = 奖励（胜1负-1其他0） + （-0.95） * 对方棋盘能达到的最大价值（max target Q）
        for i in range(len(board1)):
            if i == len(score_record2):  # 白方多一步  白方赢
                score1.append([1.0])  # 获得1分奖励
                if winner == 2:
                    print('error step count!')
            else:
                score1.append([score_record2[i][0] * -0.9])
        # score1.append([0])
        if winner == 2:
            # 惩罚败方的最后一步
            score1[len(score1) - 1][0] = -0.9
        for i in range(len(board2)):
            if i == len(score_record1) - 1:  # 黑白方步数一样 黑方赢
                score2.append([1.0])
                if winner == 1:
                    print('error step count!')
            else:
                score2.append([score_record1[i + 1][0] * -0.9])
                # score2.append([0])
        if winner == 1:
            score2[len(score2) - 1][0] = -0.9
        borders = np.concatenate([board1, board2], axis=0)
        scores = np.concatenate([score1, score2], axis=0)
        _, total_loss = self.sess.run([self.optm, self.cost], feed_dict={self.x: borders, self.y: scores})

        self.avg_loss += total_loss
        self.train_times += 1
        if GameData.AutoPlayTimes % 100 == 0:
            loss_avg = self.avg_loss / self.train_times
            print('train avg loss ' + str(loss_avg) + ' has times ' + str(GameData.AutoPlayTimes))
            self.avg_loss = 0
            self.train_times = 0
            if GameData.AutoPlayTimes == 0:
                self.saver.save(self.sess, GameConfig.saver_abs_path, global_step=0)
            else:
                self.saver.save(self.sess, GameConfig.saver_abs_path,
                                global_step=(GameData.AutoPlayTimes - 1) // 100)

    def play_width_human(self):
        self.restore()
        GameData.PlayWithComputer = self.computer_play
        GameData.TrainNet = self.train_once
        GameMap.ShowWind()
