# 训练模块

import configparser
from src.tool import throw
import json
import src.constant as constant
from src.net import Server
from database.dbop import Database
import os
import src.global_config as gl
import src.tool as tool
from src.log import Log
from src.go import Go
from ai.data_center import Data_Center
from ai.data_set import MyDataSet
from database.dbop import Database
import torch.nn as nn
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
import torch
import random
import numpy as np
import platform

os_type = platform.system()

# 导入自己编写的神经网络
from model.loss import Loss
from model.policy import PolicyNet
from model.feature import Extractor
from model.value import ValueNet
from ai.engine import Honinbo

log_fileName = './saved_model/overfitting.txt'

# 配置文件地址
filename = './config.ini'
# 读取程序配置
config = configparser.ConfigParser()
config.read(filename, encoding='utf-8')
gl.set_value('model_path', config['model'])
gl.set_value('ai', config['ai'])
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device = ", DEVICE)
# 批数据大小
batch_size = int(tool.get_config_by_key(config['ai'], 'BATCH_SIZE'))
check_point = 0
go = Go()

# 模型文件地址
feature_path = tool.get_config_by_key(config['model'], 'feature')
policy_path = tool.get_config_by_key(config['model'], 'policy')
value_path = tool.get_config_by_key(config['model'], 'value')


# 获取数据集
def get_data_set() -> list:
    # 设置全局变量
    dbpath = os.path.dirname(os.path.realpath(__file__))
    gl.set_value('logpath', dbpath)
    # dbpath = os.path.join(dbpath, config['db']['filepath'])
    # gl.set_value('dbpath', dbpath)
    # 一定要在设置完全局变量再实例化
    old = os.path.join(dbpath, tool.get_config_by_key(config['db'], 'old'))
    current = os.path.join(dbpath, tool.get_config_by_key(config['db'], 'current'))
    ai = os.path.join(dbpath, tool.get_config_by_key(config['db'], 'ai'))
    Tom = os.path.join(dbpath, tool.get_config_by_key(config['db'], 'Tom'))

    # 获取数据集
    model = {
        "old": old,
        'current': current,
        'ai': ai,
        "Tom": Tom
    }
    gl.set_value('model', model)
    log = Log()

    data_center = Data_Center()

    all_data = data_center.get_all_data_set()
    return all_data


# 如何组成训练数据
def collate_fn(example: list) -> tuple:
    state = []
    winner_list = []
    probas_list = []

    for i in range(len(example)):
        single = example[i]

        goban, winner, parsed_sgf, string_list = go.parse_single_data(single)
        goban_len = len(goban)
        for j in range(goban_len - 1):
            # 随机取棋盘的一块值
            rand = j

            winner_list.append(winner)

            current_dict = parsed_sgf[rand]

            # 计算出下一手的位置
            next_dict = parsed_sgf[rand + 1]
            next_board = go.getEmptyBoard()
            next_x = next_dict['x']
            next_y = next_dict['y']
            next_board[next_x][next_y] = 1

            current_string = string_list[rand]
            color = current_dict['color']

            current_board = goban[rand]
            # 上一手是黑下 那么引擎要关注白的局面，反之也是
            if color == 'white':
                color_set_num = 0
                current_color = -1
                oppo_color = 1
            else:
                color_set_num = 1
                current_color = 1
                oppo_color = -1
            # myself_board, oppo_board, my_1, my_2, my_3, oppo_1, oppo_2, oppo_3 \
            #     = go.get_board_additional(board=goban[rand], string=current_string, color=color)
            my_last, oppo_last = go.get_last_board(goban, j, 1, 1)
            # 取了当前局面

            # 将原生的棋盘数据 抽成 将来落子方的棋盘 对手方的棋盘，其他位置全0处理
            myself_board, oppo_board = go.get_myself_and_oppo_board(current_board, current_color, oppo_color)

            color_board = go.get_current_color_board(color_set_num)
            #
            # my_res = [myself_board]
            # my_res.extend(my_last)
            # my_res.append(my_1)
            # my_res.append(my_2)
            # my_res.append(my_3)
            # oppo_res = [oppo_board]
            # oppo_res.extend(oppo_last)
            # oppo_res.append(oppo_1)
            # oppo_res.append(oppo_2)
            # oppo_res.append(oppo_3)
            #
            # my_res.extend(oppo_res)
            # my_res.append(color_board)

            # state.append(my_res)
            my = [myself_board]
            my.extend(my_last)
            oppo = [oppo_board]
            oppo.extend(oppo_last)
            my.extend(oppo)
            my.append(color_board)
            state.append(my)
            # 预测的结果 其他全是0 只有下一手的位置是1
            probas_list.append(next_board)

    return state, winner_list, probas_list


# 加载模型 或者重建
def load_model(inplane, outplane, out_plane_map, block) -> tuple:
    try:
        feature = torch.load(feature_path, map_location=DEVICE)
        policy = torch.load(policy_path, map_location=DEVICE)
        value = torch.load(value_path, map_location=DEVICE)
        print("从权重文件读取网络成功")
        return feature, policy, value

    except Exception as e:
        print("读取神经网络失败 原因 ->", e)
        exit(-1)


# 训练的主函数
def train(data_set: MyDataSet, time: int, test_data_set: list):
    # 获取学习率
    lr = float(tool.get_config_by_key(config['ai'], 'LR'))

    # 获取CNN的通道数 记得一定要转成int
    inplane = int(tool.get_config_by_key(config['ai'], 'inplane'))
    outplane = int(tool.get_config_by_key(config['ai'], 'outplane'))
    out_plane_map = int(tool.get_config_by_key(config['ai'], 'out_plane_map'))
    block = int(tool.get_config_by_key(config['ai'], 'BLOCK'))
    ADAM = int(tool.get_config_by_key(config['ai'], 'ADAM'))
    L2_REG = float(tool.get_config_by_key(config['ai'], 'L2_REG'))
    MOMENTUM = float(tool.get_config_by_key(config['ai'], 'MOMENTUM'))
    EPOCH = 10

    criterion = Loss()
    # 决定三个神经网络的出入通道
    feature, policy, value = load_model(inplane, outplane, out_plane_map, block)
    dataloader = DataLoader(data_set, batch_size=batch_size, collate_fn=collate_fn, shuffle=True, num_workers=5,
                            drop_last=True)

    # 把神经网络的参数合起来
    joint_params = list(feature.parameters()) + list(policy.parameters()) + list(value.parameters())
    if ADAM == 1:
        # optimizer = torch.optim.Adam(joint_params, lr=lr, weight_decay=L2_REG)
        # 先不用超参数
        optimizer = torch.optim.Adam(joint_params, lr=lr)
    else:
        optimizer = torch.optim.SGD(joint_params, lr=lr, weight_decay=L2_REG, momentum=MOMENTUM)

    # 开始训练
    print("共{}个epoch".format(EPOCH))
    epoch_loss = []
    for i in range(EPOCH):
        batch_loss = []
        epoch_winner_list = []
        for batch_idx, (state, winner_list, probas_list) in enumerate(dataloader):
            length = len(state)
            print("这批数据共{}步棋".format(length))
            batch_winner_list = []
            single_loss = []
            while length >= batch_size:
                try:
                    b_state = state[0:batch_size]
                    b_winner = winner_list[0:batch_size]
                    b_probas = probas_list[0:batch_size]
                    # 转张量
                    b_state = torch.tensor(b_state, dtype=torch.float, device=DEVICE)
                    b_winner = torch.tensor(b_winner, dtype=torch.float, device=DEVICE)
                    b_probas = torch.tensor(b_probas, dtype=torch.float, device=DEVICE)
                    b_probas = torch.reshape(b_probas, (batch_size, -1))

                    optimizer.zero_grad()

                    # 用提取器拿到特征，传给策略器和评价器
                    feature_maps = feature(b_state.clone().detach())
                    winner = value(feature_maps)
                    probas = policy(feature_maps)
                    loss = criterion(winner, b_winner, probas, b_probas)
                    loss.backward()
                    optimizer.step()
                    single_loss.append(float(loss))
                    state = state[batch_size:length]
                    winner_list = winner_list[batch_size:length]
                    probas_list = probas_list[batch_size:length]
                    length = length - batch_size
                    w_list = winner.tolist()
                    batch_winner_list.append(w_list[0])
                except Exception as e:
                    print("训练发生错误，原因为 ->", e)
            batch_loss.append(np.mean(single_loss))
            epoch_winner_list.append(np.mean(batch_winner_list))
            print(
                "当前epoch=[{}] 共[{}]个,index = [{}] 到[{}]结束训练 批次loss = {}，time= {}".format(i + 1, EPOCH, batch_idx, time,
                                                                                         np.mean(single_loss),
                                                                                         tool.getCurrentTimeFormat()))

        print("训练完一个epoch，开始测试")
        try:
            test(test_data_set)
        except Exception as e:
            print("测试出现错误->", e)
        print("Average backward pass ->", np.mean(batch_loss))
        epoch_loss.append(np.mean(batch_loss))
        print("epoch loss为 ", epoch_loss)
        fs = open(log_fileName, encoding='utf-8', mode='r+')
        try:
            fs.write(json.dumps({
                'type': 'loss',
                'data': batch_loss
            }))
            fs.write('\n')
            fs.write(json.dumps({
                'type': "epoch_winner",
                'data': epoch_winner_list
            }))

            fs.write('\n')
        finally:
            fs.close()


# 测试模块
def test(data_set):
    engine = Honinbo()

    # 精度
    accuracy = []

    for i in range(len(data_set)):
        sgf = data_set[i]['sgf']
        goban, winner, parsed_sgf, string_list = go.parse_sgf_2_network_data(sgf)
        current_goban = []

        # 正确的个数
        correct = 0

        # 最后一步棋不分析
        for j in range(len(goban) - 1):
            # 每个棋谱都进行一次分析
            current_goban.append(goban[j])
            # 给引擎赋值棋盘数据
            engine.board = goban[j]
            engine.string = string_list[j]
            engine.color = parsed_sgf[j]['color']

            state = engine.get_predict_data(current_goban)
            # 分析 然后获取合法的落子点
            probas, winner = engine.analyze(state)
            result = engine.transfer_analyze_to_list(probas, winner)
            legal = engine.get_legal_moves(result, 1)
            best = legal[0]
            sgf_choose = parsed_sgf[j + 1]
            if best['x'] == sgf_choose['x'] and best['y'] == sgf_choose['y']:
                correct = correct + 1

        correct_cent = float(correct) / len(goban)
        accuracy.append({
            'correct': correct,
            'num': len(goban),
            'percent': correct_cent,
        })
    print("accuracy", accuracy)
    fs = open(log_fileName, encoding='utf-8', mode='r+')
    try:
        fs.write(json.dumps({
            'type': 'accuracy',
            'data': accuracy
        }))
        fs.write('\n')
    finally:
        fs.close()
    print("保存数据成功")


if __name__ == '__main__':
    begin = tool.getCurrentTimeFormat()
    print("训练开始时间 ->", begin)
    data = get_data_set()
    train_data = data[10000:10000 + batch_size * 10]
    # 清空
    fs = open(log_fileName, encoding='utf-8', mode='w')
    try:
        fs.write('')
    finally:
        fs.close()
    # 随便搞点测试数据
    # 可以单独搞个测试函数去使用

    # 10盘棋足够了
    test_data = data[batch_size * 105:batch_size * 105 + 5]
    # test(test_data)
    print("共{}个batch".format(int(len(train_data) / batch_size)))
    my_data_set = MyDataSet(train_data)
    train(my_data_set, int(len(train_data) / batch_size), test_data)
    print("正常训练完毕")
    end = tool.getCurrentTimeFormat()

    print("训练开始时间 ->{},训练结束时间->{}".format(begin, end))
