from distutils.log import fatal
import keras
from keras import regularizers
from keras.models import Sequential
from setting import Setting
import data
import os
import numpy as np
import matplotlib.pyplot as plt
from keras import backend
from keras import layers
from keras import models
from keras.models import load_model
import tool
import time


def model(set, num="a0000", data_={}):
  """ 建立网络模型和训练
  :param set: 配置
  :param num: 保存模型文件的前缀
  :param data_: 要训练的数据集
   """
  inputs = []
  outputs = []
  x_input = layers.Input((set.MAX_STEPS, set.FRONT_VOCAB_SIZE))
  x = layers.Bidirectional(layers.LSTM(set.LSTM_UNITS, 
    return_sequences=True,
    use_bias=True,bias_initializer=keras.initializers.Zeros(),
    recurrent_dropout=set.DROPOUT_RATE,
    kernel_regularizer=regularizers.l2(set.REGULARIZERS),
    activity_regularizer=regularizers.l2(set.REGULARIZERS)))(x_input)
  x = layers.Flatten()(x)
  x = layers.Dense(set.FRONT_VOCAB_SIZE, activation='relu')(x)
  inputs.append(x_input)
  outputs.append(x)
  model = models.Model(inputs, outputs)
  model.compile(optimizer=keras.optimizers.Adam(), metrics = ['accuracy'],
    loss=keras.losses.categorical_crossentropy,
    loss_weights=[1, 1, 1, 1, 1])
  
  # 获取数据
  # data_ = data.getDateFromExeQuery(set)
  train_x = data_["train_x"]
  train_y = data_["train_y"]
  test_x = data_["test_x"]
  test_y = data_["test_y"]
  # 获取文件夹地址
  path = os.path.split(os.path.realpath(__file__))[0]
  # 创建保存模型的文件夹
  if not os.path.exists('{}/{}'.format(path,set.CHECKPOINTS_PATH)):
    os.mkdir('{}/{}'.format(path,set.CHECKPOINTS_PATH))
  # 创建保存图片的文件夹
  if not os.path.exists('{}/{}'.format(path,set.CHECKPOINTS_PATH+"_PNG")):
    os.mkdir('{}/{}'.format(path,set.CHECKPOINTS_PATH+"_PNG"))

  # 开始训练
  epoch = set.EPOCHS
  print('模型训练开始：{}_{}_{}_{}_{}_{}_{}'.format(num, epoch, set.BATCH_SIZE, set.MAX_STEPS,set.DROPOUT_RATE,set.LSTM_UNITS,set.REGULARIZERS))
  loss=[]
  val_loss=[]
  accuracy=[]
  val_accuracy=[]
  for i in range(epoch):
    H = model.fit(train_x, train_y, validation_data=(test_x, test_y), batch_size=set.BATCH_SIZE, epochs=1)
    if np.isnan(H.history["loss"][0]):
      backend.clear_session()
      return ""
    else:
      loss.append(H.history["loss"][0])
      val_loss.append(H.history["val_loss"][0])
      accuracy.append(H.history["accuracy"][0])
      val_accuracy.append(H.history["val_accuracy"][0])
      
  x = np.arange(0, epoch)
  plt.plot(x, loss, label="loss")
  plt.plot(x, val_loss, label="val_loss")
  plt.plot(x, accuracy, label="accuracy")
  plt.plot(x, val_accuracy, label="val_accuracy")
  plt.title('epoch:{}, BATCH_SIZE:{}, MAX_STEPS:{}, DROPOUT_RATE:{}, LSTM_UNITS:{}'.format(epoch,set.BATCH_SIZE,set.MAX_STEPS,set.DROPOUT_RATE,set.LSTM_UNITS))
  plt.legend()
  plt.savefig('{}/{}/{}_{}_{}_{}_{}_{}_{}.jpg'.format(path,set.CHECKPOINTS_PATH+"_PNG", num, epoch, set.BATCH_SIZE, set.MAX_STEPS,set.DROPOUT_RATE,set.LSTM_UNITS,set.REGULARIZERS), bbox_inches = 'tight')
  # 保持模型
  model.save('{}/{}/{}_{}_{}_{}_{}_{}_{}.h5'.format(path,set.CHECKPOINTS_PATH, num, epoch, set.BATCH_SIZE, set.MAX_STEPS,set.DROPOUT_RATE,set.LSTM_UNITS,set.REGULARIZERS))
  # 除之前的模型.
  backend.clear_session()
  plt.close()

  print('模型训练完成：{}_{}_{}_{}_{}_{}_{}'.format(num, epoch, set.BATCH_SIZE, set.MAX_STEPS,set.DROPOUT_RATE,set.LSTM_UNITS,set.REGULARIZERS))

def predict(set, i0, i1, i2, i3, i4, i5, i6, data_={}):
 # 获取文件夹地址
  path = os.path.split(os.path.realpath(__file__))[0]
  # 加载整个模型
  model = load_model('{}/{}/{}_{}_{}_{}_{}_{}_{}.h5'.format(path,set.CHECKPOINTS_PATH,i0,i1, i2, i3, i4, i5, i6))
  # 获取数据
  # data_ = data.getDateFromExeQuery(set, True)
  predict_x = data_["predict_x"]
  issueNo = data_["issueNo"]
  # 开始预测
  predicts = model.predict(predict_x, batch_size=1)
  # 除之前的模型.
  backend.clear_session()
  for i in range(len(predicts)):
    balls= tool.sample(predicts[i])
  return {
    "predictValue":balls, # 下期要下注的预测值
    "issueNo":issueNo[-1:], # 当前期的期号
  }


def getFiles(path):
  files = os.listdir(path)
  return files

if __name__ == '__main__':
  set = Setting()
  one = True # 是否是第一次运行代码
  while True:
    # 训练模型
    # a00001__20_32_64_0.3_16_0.1
    item = [20, 32, 16, 0.3, 64, 0.1] # # 3000:5s
    set.setDef(
      EPOCHS_=item[0],
      BATCH_SIZE_=item[1],
      MAX_STEPS_=item[2],
      DROPOUT_RATE_=item[3],
      LSTM_UNITS_=item[4],
      REGULARIZERS_=item[5],
      CHECKPOINTS_PATH_= "predictValue__authentic",
      LOTTO_DOWNLOAD_URL_=3000)
    try:
      train_x, train_y, test_x, test_y, predict_x, predict_y = data.getDate(set)
      da = {"train_x":train_x,
          "train_y":train_y, 
          "test_x":test_x,
          "test_y":test_y}
      num = "a0000_"
      # model(set,num, data_=da)

      path = os.path.split(os.path.realpath(__file__))[0]
      if one: 
        one = False
        if os.path.exists(path +"/" + set.CHECKPOINTS_PATH+"/a0000__20_32_64_0.3_16_0.1.h5"):
          print("模型文件已经存在")
        else:
          print("模型文件不存在，重新训练")
          model(set,num, data_=da)
      else:
        model(set,num, data_=da)
          

    except Exception as e:
      print("训练 --- 错误")
      print(e)
    

    # 预测 ======================
    cs = 60 # 预测 200 后，就重新训练一次
    issueNoNew = 0 # 当前期号。
    set.setDef(
      CHECKPOINTS_PATH_= "predictValue__authentic",
      SavaDataFile_= "predictValue__authentic.json",
      LOTTO_DOWNLOAD_URL_= 100,
      )
    path = os.path.split(os.path.realpath(__file__))[0]
    getFilesList = getFiles(path + "/" + set.CHECKPOINTS_PATH)
    while cs > 0: # 预测 200 后，就重新训练一次
      try:
        da = data.authenticPredictData(set)
        issueNo = da["issueNo"][-1:][0]
        if issueNoNew == issueNo:
          time.sleep(10)
          continue
        else:
          issueNoNew = issueNo
        cs = cs - 1
        # getFilesList = ["a00317__60_2_16_0.6_16.h5"]
        for item in range(0, len(getFilesList)):
          item = getFilesList[item]
          item = item[:-3]
          sl = item.split("_")
          s = []
          for _ in sl:
            if _: s.append(_)
          sl = s
          sl[0] = sl[0] + "_"
          for i in range(1, len(sl)):
            if i == 4:
              sl[i] = float(sl[i])
              continue
            if i == 6:
              sl[i] = float(sl[i])
              continue
            sl[i] = int(sl[i])
          set.setDef(EPOCHS_=sl[1], BATCH_SIZE_=sl[2], MAX_STEPS_=sl[3], DROPOUT_RATE_=sl[4], LSTM_UNITS_=sl[5])
          predictRes = predict(set,sl[0],sl[1],sl[2],sl[3],sl[4],sl[5],sl[6],data_=da)
          predictRes["fileName"] = item
          tool.savaAuthenticData(set,predictRes)
        print("======================================================================================")
        print("=================================    预测完成    =====================================")
        print("当前期为：{},\t 下期 {} 预测值为：{},".format(predictRes["issueNo"],[(predictRes["issueNo"][0]+1)],predictRes["predictValue"]))
        print(tool.curr_time())
      except Exception as e:
        print("预测 --- 错误")
        print(e)
