# library
import json
import sys
import os
import tensorflow as tf
from tensorflow import keras
from keras import regularizers

#sys.path.append('icdsweb')

#from . import shareglob
from  shareglob import *
import local_craft_loader_out5
from craft_all_five import bone, x1one, x2one,x3one, x4one, x5one, x6one, x7one, x8one, x9one, x10one, x11one, x12one, y1one, y2one, y3one, y4one, y5one

print('textilenum in craft_keras*.py file = ', textilenum)

if(textilenum>=9. and textilenum<=11.):
   import  craft_out5_10 as craft_out5_data
elif(textilenum>11. and textilenum<=15.):
   import  craft_out5_10_15 as craft_out5_data
elif(textilenum>15. and textilenum<=19.5):
   import  craft_out5_15_20 as craft_out5_data
elif(textilenum>19.5 and textilenum<=24.):
   import  craft_out5_20_24 as craft_out5_data
elif(textilenum>24. and textilenum<=29.5):
   import  craft_out5_25_30 as craft_out5_data
elif(textilenum>29.5 and textilenum<=35.):
   import  craft_out5_30_35 as craft_out5_data
elif(textilenum>35. and textilenum<=40.5):
   import  craft_out5_35_40 as craft_out5_data
elif(textilenum>40. and textilenum<=59.5):
   import  craft_out5_40 as craft_out5_data
#from polls.prediction import craft_loader_all_five
#from polls.prediction import craft_all_five

import matplotlib.pyplot as plt
import numpy as np
import random


def main():#filename):
   run_network()
   make_plot()
#   testpy()
                       
def multilayer_network():

    model=keras.models.Sequential()
    model.add(keras.layers.Dense(input_shape=(n_input,), units=n_hidden1, kernel_regularizer=regularizers.l2(eta), activation='sigmoid'))
#    model.add(keras.layers.Dense(n_hidden1, activation='sigmoid'))
    model.add(keras.layers.Dense(n_hidden2, kernel_regularizer=regularizers.l2(eta), activation='sigmoid'))
    model.add(keras.layers.Dense(n_hidden3, kernel_regularizer=regularizers.l2(eta), activation='sigmoid'))
    model.add(keras.layers.Dense(n_hidden4, kernel_regularizer=regularizers.l2(eta), activation='sigmoid'))
    model.add(keras.layers.Dense(n_hidden5, kernel_regularizer=regularizers.l2(eta), activation='sigmoid'))
    model.add(keras.layers.Dense(n_hidden6, kernel_regularizer=regularizers.l2(eta), activation='sigmoid'))
    model.add(keras.layers.Dense(n_output, activation=None))

    return model

def run_network():#filename):

 #Graph input

 g1=tf.Graph()
 with g1.as_default():
   x = tf.placeholder(tf.float32, [None, n_input])
   y = tf.placeholder(tf.float32, [None, n_output])
   

   datar=1
   if datar ==1:
      # data source
      training_data, vali_data, test_data = craft_loader_out5.load_data_wrapper()

#   pred = multilayer_network(x)
   print('ok for now in nn.') 
   #Define loss ad optimizer
   #cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
#   print('pred.graph  ={0}'.format(pred.graph))
#   print('y.graph     ={0}'.format(y.graph))
  
   model = multilayer_network()
   model.compile(loss='mse', optimizer='adam')
   print('ok for now in cost')
  
   #Initialize the variables
   init = tf.initialize_all_variables()

# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.1
## sess = tf.Session(config=config) as sess:
# with tf.Session(config=config) as sess:
 config = tf.ConfigProto()
 config.gpu_options.allow_growth = True
# cpu_num = int(os.environ.get('CPU_NUM',64))
# config = tf.ConfigProto(device_count={"CPU": cpu_num},
#                inter_op_parallelism_threads = cpu_num,
#                intra_op_parallelism_threads = cpu_num,
#                log_device_placement=True)

# with tf.Session(config=config) as sess:
 with tf.Session(config=config, graph=g1) as sess:
   sess.run(init)

   #predict_data=np.array([[[9.01],[55.7]],[[13.458],[107.1]],[[16.871],[169.83]],[[22.907],[187.]]])
   #predict_data=np.array([[[0.486],[22.4],[27.325]],[[0.5],[26.4],[33.558]],[[0.5],[30.2],[ 37.818]],[[0.4],[45.4],[39.847]]])
#   predict_data=np.array([[0.486,22.4,27.325, 0.5],[0.5,26.4,33.558, 0.5],[0.5,30.2, 37.818, 0.5],[0.4,45.4,39.847, 0.5]])
   yarnnum=True
   if yarnnum==True:
#      predict_data=np.array([[4.528,28.947,30.472, 0.5, 6.93, 1.78, 136.01, 0.89, 82.99, 6.7,  68.67, 8.87, 204.25, 87.9, 338, 0.993],
      predict_data=np.array([[4.095, 28.54, 29.95, 7.5,  1.45, 138.5, 0.9, 83.745, 6.27, 64.0, 9.90, 276.0, 0.1],
                             [4.350, 28.70, 30.07, 8.13, 1.41, 137.1, 0.9, 83.39, 6.47, 68.67, 9.07, 260.25,0.1],
                             [4.590, 28.89, 30.1,  8.88, 1.37, 136.6, 0.9, 82.99, 6.77, 73.31, 8.25, 244.55,0.1]],
                          dtype='f')

   print('shape=',predict_data.shape,predict_data[0][0])
   npred=len(predict_data)#predresu=[]
   for i in range(npred):
      predict_data[i][0]=(predict_data[i][0]-min(bone)+0.02)/(max(bone)-min(bone)+0.05)
      predict_data[i][1]=(predict_data[i][1]-min(x1one)+0.02)/(max(x1one)-min(x1one)+0.05)
      predict_data[i][2]=(predict_data[i][2]-min(x2one)+0.02)/(max(x2one)-min(x2one)+0.05)
      predict_data[i][3]=(predict_data[i][3]-min(x3one)+0.02)/(max(x3one)-min(x3one)+0.05)
      predict_data[i][4]=(predict_data[i][4]-min(x4one)+0.02)/(max(x4one)-min(x4one)+0.05)
      predict_data[i][5]=(predict_data[i][5]-min(x5one)+0.02)/(max(x5one)-min(x5one)+0.05)
      predict_data[i][6]=(predict_data[i][6]-min(x6one)+0.02)/(max(x6one)-min(x6one)+0.05)
      predict_data[i][7]=(predict_data[i][7]-min(x7one)+0.02)/(max(x7one)-min(x7one)+0.05)
      predict_data[i][8]=(predict_data[i][8]-min(x8one)+0.02)/(max(x8one)-min(x8one)+0.05)
      predict_data[i][9]=(predict_data[i][9]-min(x9one)+0.02)/(max(x9one)-min(x9one)+0.05)
      predict_data[i][10]=(predict_data[i][10]-min(x10one)+0.02)/(max(x10one)-min(x10one)+0.05)
      predict_data[i][11]=(predict_data[i][11]-min(x11one)+0.02)/(max(x11one)-min(x11one)+0.05)
      predict_data[i][12]=predict_data[i][12]#-min(x12one)+0.02)/(max(x12one)-min(x12one)+0.05)

   predva=np.array([[]])
   tocost=np.array([])

   def generator(training_data, b_size):
     total_batch = int(len(training_data)/batch_size)
     random.shuffle(training_data) # is this OK for now???

     while True:
      #Loop over all batches
      for i in range(total_batch):
         batch_x =[x for x,y in training_data[i*batch_size:(i+1)*batch_size]]
         batch_y =[y for x,y in training_data[i*batch_size:(i+1)*batch_size]]
         #yield (batch_x, batch_y)
         yield (np.array(batch_x), np.array(batch_y))

   def vali_generator(vali_data, batch_size):
     total_batch = int(len(vali_data)/batch_size)
     random.shuffle(vali_data) # is this OK for now???

     while True:
      #Loop over all batches
      for i in range(int(total_batch/2)):
      #for i in range(total_batch):
         batch_x =[x for x,y in vali_data[i*batch_size:(i+1)*batch_size]]
         batch_y =[y for x,y in vali_data[i*batch_size:(i+1)*batch_size]]
         #yield (batch_x, batch_y)
         yield (np.array(batch_x), np.array(batch_y))

   model.fit_generator(generator(training_data, batch_size), validation_data=vali_generator(vali_data, batch_size),validation_steps=30,
					steps_per_epoch=np.ceil(len(training_data)/batch_size),
					epochs=3
)

#   model.fit_generator(generator(training_data, batch_size),
#					steps_per_epoch=np.ceil(len(training_data)/batch_size),
#					epochs=3
#)

   #Training cycle
   for epoch in range(nepoch):
      avg_cost = 0.
      total_batch = int(len(training_data)/batch_size)
      random.shuffle(training_data)
      #Loop over all batches
      for i in range(total_batch):
         batch_x =[x for x,y in training_data[i*batch_size:(i+1)*batch_size]]
         batch_y =[y for x,y in training_data[i*batch_size:(i+1)*batch_size]]
#         batch_x=[]
#         batch_y=[]
#         for (x,y) in training_data[i*batch_size:(i+1)*batch_size]:
#            batch_x.append(x)
#            batch_y.append(y)
         # Run optimization op (backprop) and cost op (to get loss value)
#         c =model.train_on_batch(batch_x, batch_y)  # only GPU works
         c =model.train_on_batch(np.array(batch_x), np.array(batch_y))  # both GPU and CPU work
         #_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
         # Compute average loss
         avg_cost += c / total_batch
      tocost=np.append(tocost,avg_cost)
      # Display logs per epoch step
      if epoch % display_step == 0:
         print("Epoch:", '%04d' % (epoch+1), "cost=", \
             "{:.9f}".format(avg_cost))
#        print "Epoch:", '%04d' % (epoch+1), "cost=", \
#            "{:.9f}".format(avg_cost)

      #predva = sess.run([pred], feed_dict={x: [predict_data[0],predict_data[1]]})
      #predvatmp=sess.run(pred, feed_dict={x: predict_data})
      predvatmp= model.predict(predict_data)
#      predvatmp=sess.run(pred, feed_dict={x: predict_data})
      if epoch>0:
         predva=np.concatenate((predva,predvatmp),axis=0)
      else:
         predva=predvatmp
#      print('predict value=',predva)

   print('tocost=',tocost)#[:].tolist()
   tocost=tocost.tolist()
   predva=predva.tolist()
#   print('predva=',predva[1])
#   f = open(filename, "w")
   f = open('icds-all-five-keras', "w")
   json.dump({"prediction":
              [tocost, predva]},
             f)
   f.close()

#   filenamenn='weights-biases-out5-'+ str(textilenum) + '.hdf5'
#   print('filenamenn= ', filenamenn)
#   filepath=filenamenn

   if(textilenum>=9. and textilenum<=11.):
      filepath='weights-biases-out5-10.hdf5'
   elif(textilenum>11. and textilenum<=15.):
      filepath='weights-biases-out5-10-15.hdf5'
   elif(textilenum>15. and textilenum<=19.5):
      filepath='weights-biases-out5-15-20.hdf5'
   elif(textilenum>19.5 and textilenum<=24.):
      filepath='weights-biases-out5-20-24.hdf5'
   elif(textilenum>24. and textilenum<=29.5):
      filepath='weights-biases-out5-25-30.hdf5'
   elif(textilenum>29.5 and textilenum<=35.):
      filepath='weights-biases-out5-30-35.hdf5'
   elif(textilenum>35. and textilenum<=40.):
      filepath='weights-biases-out5-35-40.hdf5'
   elif(textilenum>40. and textilenum<=59.5):
      filepath='weights-biases-40.hdf5'

   model.save(filepath)

#   print("Optimization Finished!")
   # Test model
#   correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
#   # Calculate accuracy
#   accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#   print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
   #print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})


def make_plot():#filename):
    """Load the results from the file ``filename``, and generate the
    corresponding plot.

    """

    servern=shareglob.gservern

    model=multilayer_network()

#    filenamenn='weights-biases-out5-'+ str(textilenum) + '.hdf5'
#    filepath='filenamenn'

    print('textilenum= ', textilenum)

    if(textilenum>=9. and textilenum<=11.):
       print('textilenum= ', textilenum)
       filepath='weights-biases-out5-10.hdf5'
    elif(textilenum>11. and textilenum<=15.):
       filepath='weights-biases-out5-10-15.hdf5'
    elif(textilenum>15. and textilenum<=19.5):
       filepath='weights-biases-out5-15-20.hdf5'
    elif(textilenum>19.5 and textilenum<=24.):
       filepath='weights-biases-out5-20-24.hdf5'
    elif(textilenum>24. and textilenum<=29.5):
       filepath='weights-biases-out5-25-30.hdf5'
    elif(textilenum>29.5 and textilenum<=35.):
       filepath='weights-biases-out5-30-35.hdf5'
    elif(textilenum>35. and textilenum<=40.):
       filepath='weights-biases-out5-35-40.hdf5'
    elif(textilenum>40. and textilenum<=59.5):
       filepath='weights-biases-40.hdf5'

    model.load_weights(filepath)
#    model.load_weights('weights-biases-out5.hdf5')

    inputway=1
    if(inputway==1):
#      predict_data=np.array([[0.486,22.4,27.325, 0.5],[0.5,26.4,33.558, 0.5],[0.5,30.2, 37.818, 0.5],[0.4,45.4,39.847, 0.5]],dtype='f')
      predict_data=np.array([[4.095, 28.54, 29.95, 7.5,  1.45, 138.5, 0.9, 83.745, 6.27, 64.0, 9.90, 276.0, 0.1],
                             [4.350, 28.70, 30.07, 8.13, 1.41, 137.1, 0.9, 83.39, 6.47, 68.67, 9.07, 260.25,0.1],
                             [4.590, 28.89, 30.1,  8.88, 1.37, 136.6, 0.9, 82.99, 6.77, 73.31, 8.25, 244.55,0.1],
                             [5.520, 29.11, 31.22, 6.75, 0.43, 142.11,0.93,83.74, 7.72, 75.94, 8.61, 207.89,0.1]],
                          dtype='f')
#     datas=['0.486', '22.4', '27.325', '0.5','26.4','33.558','0.5', '30.2', '37.818','0.4','45.4','39.847']
#     datas=[0.486, 22.4, 27.325, 0.5,26.4,33.558,0.5, 30.2, 37.818,0.4,45.4,39.847]

    print('shape=',predict_data.shape,predict_data[0][0])
    npred=len(predict_data)#predresu=[]
    for i in range(npred):
       predict_data[i][0]=(predict_data[i][0]-min(bone)+0.02)/(max(bone)-min(bone)+0.05)
       predict_data[i][1]=(predict_data[i][1]-min(x1one)+0.02)/(max(x1one)-min(x1one)+0.05)
       predict_data[i][2]=(predict_data[i][2]-min(x2one)+0.02)/(max(x2one)-min(x2one)+0.05)
       predict_data[i][3]=(predict_data[i][3]-min(x3one)+0.02)/(max(x3one)-min(x3one)+0.05)
       predict_data[i][4]=(predict_data[i][4]-min(x4one)+0.02)/(max(x4one)-min(x4one)+0.05)
       predict_data[i][5]=(predict_data[i][5]-min(x5one)+0.02)/(max(x5one)-min(x5one)+0.05)
       predict_data[i][6]=(predict_data[i][6]-min(x6one)+0.02)/(max(x6one)-min(x6one)+0.05)
       predict_data[i][7]=(predict_data[i][7]-min(x7one)+0.02)/(max(x7one)-min(x7one)+0.05)
       predict_data[i][8]=(predict_data[i][8]-min(x8one)+0.02)/(max(x8one)-min(x8one)+0.05)
       predict_data[i][9]=(predict_data[i][9]-min(x9one)+0.02)/(max(x9one)-min(x9one)+0.05)
       predict_data[i][10]=(predict_data[i][10]-min(x10one)+0.02)/(max(x10one)-min(x10one)+0.05)
       predict_data[i][11]=(predict_data[i][11]-min(x11one)+0.02)/(max(x11one)-min(x11one)+0.05)
       predict_data[i][12]=predict_data[i][12]# -min(x12one)+0.02)/(max(x12one)-min(x12one)+0.05)

#       predict_data[i][13]=(predict_data[i][13]-min(y3one)+0.005)/(max(y3one)-min(y3one)+0.01)
#       predict_data[i][14]=(predict_data[i][14]-min(y4one)+0.005)/(max(y4one)-min(y4one)+0.01)
#       predict_data[i][15]=(predict_data[i][15]-min(y5one)+0.005)/(max(y5one)-min(y5one)+0.01)
#    predva=np.array([[]])
#    tocost=np.array([])

    print('len(predict_data) = ', len(predict_data))
    print('predict_data[0] = ', predict_data[0])

    predvaout=model.predict(predict_data)
#    predvaout=multilayer_out(predict_data)

    predtestpart=predvaout#model.predict(predict_data)
    predtest1=np.array(predtestpart)[:,0]*(max(y1one)-min(y1one)+0.01) +min(y1one)-0.005
    predtest2=np.array(predtestpart)[:,1]*(max(y2one)-min(y2one)+0.01) +min(y2one)-0.005
    predtest3=np.array(predtestpart)[:,2]*(max(y3one)-min(y3one)+0.01) +min(y3one)-0.005
    predtest4=np.array(predtestpart)[:,3]*(max(y4one)-min(y4one)+0.01) +min(y4one)-0.005
    predtest5=np.array(predtestpart)[:,4]*(max(y5one)-min(y5one)+0.01) +min(y5one)-0.005

    predtest1=[np.round(a,2) for a in predtest1]
    predtest2=[np.round(a,2) for a in predtest2]
    predtest3=[np.round(a,2) for a in predtest3]
    predtest4=[np.round(a,2) for a in predtest4]
    predtest5=[np.round(a,2) for a in predtest5]
    print('predtest1=',predtest1)
    print('predtest2=',predtest2)
    print('predtest3=',predtest3)
    print('predtest4=',predtest4)
    print('predtest5=',predtest5)


    predvad= model.predict(craft_out5_data.datatrsh)    #all the training data independent variable
#    print('output all test data= ', predtest)

#    print('predva=',sess.run(predva))
    large_predrd1=np.array(predvad)[:,0]*(max(y1one)-min(y1one)+0.01) +min(y1one)-0.005
    large_predrd2=np.array(predvad)[:,1]*(max(y2one)-min(y2one)+0.01) +min(y2one)-0.005
    large_predrd3=np.array(predvad)[:,2]*(max(y3one)-min(y3one)+0.01) +min(y3one)-0.005
    large_predrd4=np.array(predvad)[:,3]*(max(y4one)-min(y4one)+0.01) +min(y4one)-0.005
    large_predrd5=np.array(predvad)[:,4]*(max(y5one)-min(y5one)+0.01) +min(y5one)-0.005

#    large_predrd1=[np.round(a,2) for a in large_predrd1]
#    large_predrd2=[np.round(a,2) for a in large_predrd2]
    large_predrd1=[np.round(a,2).tolist() for a in large_predrd1]
    large_predrd2=[np.round(a,2).tolist() for a in large_predrd2]
    large_predrd3=[np.round(a,2).tolist() for a in large_predrd3]
    large_predrd4=[np.round(a,2).tolist() for a in large_predrd4]
    large_predrd5=[np.round(a,2).tolist() for a in large_predrd5]

#    print('large_predr1=',large_predrd1)
#    print('large_predr2=',large_predrd2)

    yarnnumori=np.array(craft_out5_data.datatrshy)[:,0]*(max(y1one)-min(y1one)+0.01) +min(y1one)-0.005
    yarnstrori=np.array(craft_out5_data.datatrshy)[:,1]*(max(y2one)-min(y2one)+0.01) +min(y2one)-0.005
    yarntw1ori=np.array(craft_out5_data.datatrshy)[:,2]*(max(y3one)-min(y3one)+0.01) +min(y3one)-0.005
    yarntw2ori=np.array(craft_out5_data.datatrshy)[:,3]*(max(y4one)-min(y4one)+0.01) +min(y4one)-0.005
    yarntw3ori=np.array(craft_out5_data.datatrshy)[:,4]*(max(y5one)-min(y5one)+0.01) +min(y5one)-0.005

#    yarnnumori=[np.round(a,2) for a in yarnnumori]
#    yarnstrori=[np.round(a,2) for a in yarnstrori]
    yarnnumori=[np.round(a,2).tolist() for a in yarnnumori]
    yarnstrori=[np.round(a,2).tolist() for a in yarnstrori]
    yarntw1ori=[np.round(a,2).tolist() for a in yarntw1ori]
    yarntw2ori=[np.round(a,2).tolist() for a in yarntw2ori]
    yarntw3ori=[np.round(a,2).tolist() for a in yarntw3ori]

    errnum=abs(np.array(large_predrd1)- np.array(yarnnumori))/np.array(yarnnumori)
    errstr=abs(np.array(large_predrd2)- np.array(yarnstrori))/np.array(yarnstrori)
    errtw1=abs(np.array(large_predrd3)- np.array(yarntw1ori))/np.array(yarntw1ori)
    errtw2=abs(np.array(large_predrd4)- np.array(yarntw2ori))/np.array(yarntw2ori)
    errtw3=abs(np.array(large_predrd5)- np.array(yarntw3ori))/np.array(yarntw3ori)

#    errnum=["%.3f%%" % (a*100) for a in errnum]
#    errstr=["%.3f%%" % (a*100) for a in errstr]
    errnum=[np.round(a,3).tolist() for a in errnum]
    errstr=[np.round(a,3).tolist() for a in errstr]
    errtw1=[np.round(a,3).tolist() for a in errtw1]
    errtw2=[np.round(a,3).tolist() for a in errtw2]
    errtw3=[np.round(a,3).tolist() for a in errtw3]

    outyarnnum=list(zip(large_predrd1, yarnnumori, errnum))
    outyarnstr=list(zip(large_predrd2, yarnstrori, errstr))
#    print('outyarnnum =',outyarnnum)
#    print('outyarnstr =',outyarnstr)

    f = open('icds-all-test', 'w')
    json.dump({'yarnnum': [outyarnnum], 'yarnstr': [outyarnstr]}, f)
    f.close()


    print('error of yarn number max = ', max(errnum))
    print('error of yarn strength max = ', max(errstr))
    print('error of yarn twist max = ', max(errtw1))
    print('error of yarn twist max = ', max(errtw2))
    print('error of yarn twist max = ', max(errtw3))

    if(servern==1):
      f = open(r"icds-all-five-keras", "r")
    else:
      f = open(r"icds-all-five-keras", "r")
    results = json.load(f)
    f.close()
    tocost, predva = results[
        "prediction"]
    # Convert raw classification numbers to percentages, for plotting
    tocost = [x for x in tocost]

    large_predr1=np.array(predva)[:,0]*(max(y1one)-min(y1one)+0.01) +min(y1one)-0.005
    large_predr2=np.array(predva)[:,1]*(max(y2one)-min(y2one)+0.01) +min(y2one)-0.005
    large_predr3=np.array(predva)[:,2]*(max(y3one)-min(y3one)+0.01) +min(y3one)-0.005
    large_predr4=np.array(predva)[:,3]*(max(y4one)-min(y4one)+0.01) +min(y4one)-0.005
    large_predr5=np.array(predva)[:,4]*(max(y5one)-min(y5one)+0.01) +min(y5one)-0.005

    large_pred11=[large_predr1[i] for i in range(0,len(large_predr1),3)]
    large_pred12=[large_predr1[i] for i in range(1,len(large_predr1),3)]
    large_pred13=[large_predr1[i] for i in range(2,len(large_predr1),3)]
    large_pred14=[large_predr1[i] for i in range(3,len(large_predr1),3)]
    large_pred15=[large_predr1[i] for i in range(4,len(large_predr1),3)]
    
    large_pred21=[large_predr2[i] for i in range(0,len(large_predr2),3)]
    large_pred22=[large_predr2[i] for i in range(1,len(large_predr2),3)]
    large_pred23=[large_predr2[i] for i in range(2,len(large_predr2),3)]
    large_pred24=[large_predr2[i] for i in range(3,len(large_predr2),3)]
    large_pred25=[large_predr2[i] for i in range(4,len(large_predr2),3)]

    large_pred31=[large_predr3[i] for i in range(0,len(large_predr3),3)]
    large_pred32=[large_predr3[i] for i in range(1,len(large_predr3),3)]
    large_pred33=[large_predr3[i] for i in range(2,len(large_predr3),3)]
    large_pred34=[large_predr3[i] for i in range(3,len(large_predr3),3)]
    large_pred35=[large_predr3[i] for i in range(4,len(large_predr3),3)]

    large_pred41=[large_predr4[i] for i in range(0,len(large_predr4),3)]
    large_pred42=[large_predr4[i] for i in range(1,len(large_predr4),3)]
    large_pred43=[large_predr4[i] for i in range(2,len(large_predr4),3)]
    large_pred44=[large_predr4[i] for i in range(3,len(large_predr4),3)]
    large_pred45=[large_predr4[i] for i in range(4,len(large_predr4),3)]

    large_pred51=[large_predr5[i] for i in range(0,len(large_predr5),3)]
    large_pred52=[large_predr5[i] for i in range(1,len(large_predr5),3)]
    large_pred53=[large_predr5[i] for i in range(2,len(large_predr5),3)]
    large_pred54=[large_predr5[i] for i in range(3,len(large_predr5),3)]
    large_pred55=[large_predr5[i] for i in range(4,len(large_predr5),3)]

    print('large_pred1 = ', round(large_pred11[-1],2), round(large_pred12[-1],2), round(large_pred13[-1],2), round(large_pred14[-1],2), round(large_pred15[-1],2))
    print('large_pred2 = ', round(large_pred21[-1],2), round(large_pred22[-1],2), round(large_pred23[-1],2), round(large_pred24[-1],2), round(large_pred25[-1],2))
    print('large_pred3 = ', round(large_pred31[-1],2), round(large_pred32[-1],2), round(large_pred33[-1],2), round(large_pred34[-1],2), round(large_pred35[-1],2))
    print('large_pred4 = ', round(large_pred41[-1],2), round(large_pred42[-1],2), round(large_pred43[-1],2), round(large_pred44[-1],2), round(large_pred45[-1],2))
    print('large_pred5 = ', round(large_pred51[-1],2), round(large_pred52[-1],2), round(large_pred53[-1],2), round(large_pred54[-1],2), round(large_pred55[-1],2))
#    print('predtest1=',predtest1)
#    print('predtest2=',predtest2)

    print('predtest1 ori=',[10.0, 10.0, 10.0, 10.0])
    print('predtest2 ori=',[15.8, 15.4, 15.8, 15.0])

    salarge_predr1=[large_pred11[-1], large_pred12[-1], large_pred13[-1]]#, large_pred14[-1]]
    salarge_predr2=[large_pred21[-1], large_pred22[-1], large_pred23[-1]]#, large_pred24[-1]]
    salarge_predr1=[np.round(a,2) for a in salarge_predr1]
    salarge_predr2=[np.round(a,2) for a in salarge_predr2]

    print('len(large_pred21) = ',len(large_pred21))#=[np.round(a,2) for a in salarge_predr2]
#    salarge_predr1=[Decimal('a').quantize(Decimal('0.00')) for a in salarge_predr1]
#    salarge_predr2=[Decimal('a').quantize(Decimal('0.00')) for a in salarge_predr2]

#    salarge_predr1=salarge_predr1.tolist()#=[a.tolist() for a in salarge_predr1]
#    salarge_predr2=salarge_predr2.tolist()#=[a.tolist() for a in salarge_predr2]
    salarge_predr1=[a.tolist() for a in salarge_predr1]#=[a.tolist() for a in salarge_predr1]
    salarge_predr2=[a.tolist() for a in salarge_predr2]#=[a.tolist() for a in salarge_predr2]

    if(servern==1):
      f = open(r"results-all-keras.txt", 'w')
    else:
      f = open(r"results-all-keras.txt", 'w')

    json.dump({"fible strength":
               [salarge_predr1],
               "textile number": [salarge_predr2]},
              f)
    f.close()

def testpy():
    print('OK')

if __name__ == "__main__":
	 main()
