from logging import log
from model import Agent, saveAgent, loadAgent
from board import board
import numpy as np
from tensorboardX import SummaryWriter
import os
import datetime
import copy
import argparse
import logging

# PATTERNS = [
#     [0,1,2,3,4,5],
#     [4,5,6,7,8,9],
#     [0,1,2,4,5,6],
#     [4,5,6,8,9,10]
# ]

PATTERNS = [
    [0,1,2,3],
    [4,5,6,7],
    [0,1,4,5],
    [1,2,5,6],
    [5,6,9,10]
]

logdir = os.path.join('./log', datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
os.makedirs(logdir, exist_ok=True)
writer = SummaryWriter(logdir)

logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)

if __name__ == '__main__':
    # argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--max_num', type=int, default=15, help='log2 of max number in the game')
    parser.add_argument('--num_epochs', type=int, default=10000)
    parser.add_argument('--show_interval', type=int, default=100)
    parser.add_argument('--save_interval', type=int, default=1000)
    parser.add_argument('--lr', type=float, default=0.1)
    parser.add_argument('--large_num_training', action='store_true')
    parser.add_argument('--large_num', type=int, default=10, help='start large number')
    parser.add_argument('--large_num_train_times', type=int, default=10, help='how many times to train for large numbers in each epoch')
    parser.add_argument('--save_path', type=str, default='./model')
    parser.add_argument('--load_ckpt', type=str)

    parser.add_argument('--patterns', type=str)
    args = parser.parse_args()
    logger.info(args)

    if not args.patterns:
        args.patterns = PATTERNS
    else:
        args.patterns = eval(args.patterns)

    if not args.load_ckpt:
        agent = Agent(patterns=args.patterns, maxnum=args.max_num)
        logger.info('using patterns: {}'.format(args.patterns))
    else:
        agent = loadAgent(args.load_ckpt)
        logger.info('loaded ckpt from {}'.format(args.load_ckpt))

    os.makedirs(args.save_path, exist_ok=True)

    logger.info('start training!')
    for epoch in range(args.num_epochs):
        # init score and env (2048)
        score = 0.0
        game = board().popup().popup()
        records = []

        large_number_flag = False        # 是否已经达到了指定的大数
        while True:
            # choose action
            next_games = [game.up(), game.down(), game.left(), game.right()]
            action = np.argmax(agent.evaulate(next_games))
            next_game, reward = next_games[action]
            if game.end():
                break
            next_game_after = next_game.popup()
            score += reward
            records.insert(0, (game.tile, action, reward, next_game.tile, next_game_after.tile) )
            game = next_game_after

            if args.large_num_training and max(game.tile) > args.large_num:
                agent.learn(records, args.lr)
                large_num_start_game = copy.deepcopy(game)
                large_num_start_score = score
                large_number_flag = True
                large_num_start_records = copy.deepcopy(records)
                for large_epoch in range(args.large_num_train_times):
                    # 进行几回合的针对大数的训练
                    records = copy.deepcopy(large_num_start_records)
                    score = large_num_start_score
                    game = copy.deepcopy(large_num_start_game)
                    while True:
                        next_games = [game.up(), game.down(), game.left(), game.right()]
                        action = np.argmax(agent.evaulate(next_games))
                        next_game, reward = next_games[action]
                        if game.end():
                            break
                        next_game_after = next_game.popup()
                        score += reward
                        records.insert(0, (game.tile, action, reward, next_game.tile, next_game_after.tile))
                        game = next_game_after
                    agent.learn(records, args.lr)

                # 将最后一次大数学习的结果记录下来
                agent.metrics.append((score, len(records), game.tile))
                writer.add_scalar('score', score, epoch)
                writer.add_scalar('game_steps', len(records), epoch)
                writer.add_scalar('board_sum', np.sum(game.tile), epoch)
                break   # 退出本epoch的while True循环

        if not args.large_num_training or not large_number_flag:
            agent.learn(records, args.lr)
            agent.metrics.append( (score, len(records), game.tile) )
            writer.add_scalar('score', score, epoch)
            writer.add_scalar('game_steps', len(records), epoch)
            writer.add_scalar('board_sum', np.sum(game.tile), epoch)

        if (epoch+1) % args.show_interval == 0:
            # clear_output(wait=True)
            agent.showStattistic(epoch+1, args.show_interval)
        
        if (epoch+1) % args.save_interval == 0:
            saveAgent(agent, os.path.join(args.save_path, 'epoch-%d.pkl' % (epoch + 1)))
