import io
import os
import socket
from datetime import datetime
from functools import partial
import threading
import time
# import paddlex as pdx
import numpy as np
import pandas as pd
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from Dataset import MyDataset
from Seq2Seq import Seq2seq
from MyLoss import CrossEntropyCriterion
from Accuracy import myPerplexity
from Deploy import deploy
from paddle.io import Dataset
import paddlenlp
from paddlenlp.metrics import Perplexity


class model_train():
    def __init__(self, train_place='gpu'):
        super().__init__()
        dataname = "心理语料"
        dic_kw_path = "dataset/字向量kw.npy"
        dic_wk_path = "dataset/字向量wk.npy"
        paddle.set_device(train_place)
        self.Batch_Szie = 32
        self.Epoch = 10
        self.train_dataset = MyDataset(dataname, dic_kw_path, dic_wk_path, batch_size=self.Batch_Szie)
        self.train_loader = paddle.io.DataLoader(self.train_dataset, batch_size=self.Batch_Szie, shuffle=True)
        # self.train_loader = self.BaseDataloader(self.train_dataset, batch_size=self.Batch_Szie)
        self.seqmodel = Seq2seq(hidden_dim=256, num_layers=2, dropout=0.2)
        self.loss_fn = CrossEntropyCriterion()
        self.acc = myPerplexity()
        try:
            layer_state_dict = paddle.load("work/model/final.pdparams")
            self.seqmodel.set_state_dict(layer_state_dict)
            self.optim = paddle.optimizer.Adam(parameters=self.seqmodel.parameters())
            self.optim.set_state_dict(paddle.load("work/model/final.pdopt"))
            print("加载成功")
        except:
            self.optim = paddle.optimizer.Adam(parameters=self.seqmodel.parameters())
            print("加载失败")

    def train(self):
        self.loss_fn = CrossEntropyCriterion()
        model = paddle.Model(self.seqmodel)
        model.prepare(optimizer=self.optim,
                      loss=self.loss_fn, metrics=self.acc)
        callback = paddle.callbacks.VisualDL("log")

        model.fit(self.train_dataset,
                  epochs=self.Epoch,
                  batch_size=self.Batch_Szie,
                  save_dir="work/model",
                  verbose=1,
                  callbacks=callback,
                  shuffle=False,
                  )

    def basetrain(self):
        model = self.seqmodel
        acc_fn = self.acc
        loss_fn = self.loss_fn
        optimer = self.optim
        model.train()
        optimer.clear_grad()
        allstep = 0
        tlen = len(self.train_loader)
        for epoch in range(self.Epoch):
            acc_fn.reset()
            for batchid, data in enumerate(self.train_loader):
                # 训练数据
                time_start = datetime.now()
                xdata = data[0]  # 训练数据
                seq_len = data[1]
                maskd = data[2]
                labels = data[3]
                label = data[4]
                label_len = data[5]
                maskl = data[6]
                predicts = model(xdata, seq_len, maskd, labels)  # 预测结果
                # 计算损失 等价于 prepare 中loss的设置
                loss = loss_fn(predicts, label, label_len, maskl)

                # 计算准确率 等价于 prepare 中metrics的设置
                acc = acc_fn.getacc(predicts, label, label_len, maskl)
                # 下面的反向传播、打印训练信息、更新参数、梯度清零都被封装到 Model.fit() 中
                # 反向传播
                loss.backward()

                # 更新参数
                optimer.step()
                # 梯度清零
                optimer.clear_grad()
                allstep += 1
                time_end = datetime.now()
                steptime = (time_end - time_start).microseconds / 1000
                self.ShowProcess(tlen, batchid + 1, epoch, self.Epoch, loss.detach().cpu().numpy(), acc,
                                 steptime)  # torch.save(model, save_dir + '/model.pt')# self.baseeval(model, loss_fn, acc_fn)
            # Gmodel = paddle.Model(model)
            # Gmodel.prepare(optimizer=optimer, loss=loss_fn, metrics=acc_fn)
            # Gmodel.evaluate(self.test_loader)
            # model.train()

    def ShowProcess(self, total, index, epoch, Epoch, loss, acc, steptime):
        process_str = ''
        isnext = ''
        if index == total:
            isnext = '\n'
        for i in range(50):
            if index / total >= i / 50:
                process_str += '>'
            else:
                process_str += ' '
        process = '[' + process_str + '|' + '%.3f' % ((index) * 100 / total) + '%' + "]"
        etaALL = int((total - index) * steptime / 1000)  # 秒
        etaS = int(etaALL % 60)
        etaH = int(etaALL / 3600)
        etaM = int((etaALL - etaH * 3600) / 60)
        ETA = str(etaH) + '时:' + str(etaM) + '分:' + str(etaS) + '秒'
        acc = '%.5f' % acc
        if index % 10 == 0 or index == total:
            print('\r', "Epoch:", '(', epoch+1, '/', Epoch, ')', process, '',
                  "Step:", '(', index, '/', total, ')',
                  "损失值:{},准确率:{},单步耗时:{}ms,剩余时间:{}".format(loss, acc, steptime, ETA),
                  end=isnext, flush=False)
