#coding=utf-8

import os
import math
import Config as conf
import numpy as np
import scipy
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix, dok_matrix
import time
import pickle
import pymongo
from pymongo import MongoClient

from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark.ml.feature import Word2Vec
from pyspark.ml.feature import HashingTF, IDF, Tokenizer
from PipeDataProcess import PipeUtil
import Util
import appUsage
import appAction

# global variable for both traing and prediction
#gradeArray = ['高中','本科', '大专', '硕士', '初中及以下', '中专/技校', '博士及以上']
gradeArray = ['本科', '大专', '中专/技校', '博士及以上', '高中', '初中及以下', '硕士']
#gradeArray = []

def canConvertToInt(object):
    try:
        int(object)
        return True
    except:
        return False

# get contest_dataset_ad dict "ad_id" as key
def getAd():
    adDict = getDataDict('contest_dataset_ad/', 2)

    assert adDict[97133] == ['340','1535','58448']
    assert adDict[31172] == ['340','5290','33747']
    assert adDict[114749] == ['174','9081','84809']
    assert len(adDict) == 116079 #+ 1

    return adDict

# get contest_dataset_ad dict "user_id" as key 
def getUserProfile(useDb=False):
    #userDict = getDataDict('contest_dataset_user_profile/', 0, True)
    userDict = getDataDict('contest_dataset_user_profile/', 0, useDb)

    start = time.time()
    if not useDb:
        assert userDict[4564426] == ['4','1','中专/技校','61','36','151','\\N']
        assert userDict[6096011] == ['2','1','\\N','48','1164','105','\\N']
    print("userDict assert query cost: %s"%(time.time()-start))

    return userDict

# get contest_dataset_ad dict "app_id" as key 
def getAppCategory():
    appDict = getDataDict('contest_dataset_app_category/', 0)

    assert appDict[18275] == ['695412,45479,85932,827188','7','37']
    assert len(appDict) == 49082 + 51596 #+ 1

    return appDict



def getDataDict(dataPath, keyIndex, useDb = False):
    start = time.time()
    adDataPath = conf.ROOT_DATA_FILE_PATH + dataPath
    serializationDataPath = adDataPath + 'pythonData'
    dataDict = {}
    if useDb:
        dbClient = MongoClient()
        db = dbClient.dmContest
        collection = db.userProfile
    if not os.path.exists(serializationDataPath):
        for parent, dirnames, filenames in os.walk(adDataPath):
            for filename in filenames:
                if filename == '_SUCCESS' or filename == '.DS_Store':
                    continue

                file = os.path.join(parent, filename)
                print(file)
                with open(file, encoding='utf-8') as file:
                    for line in file:
                        tmp = line.split()
                        if len(tmp) == 0:
                            continue
                        if useDb:
                            data = {'k':tmp[keyIndex], 'v':[i for i in tmp if i!= tmp[keyIndex]]}
                            collection.insert_one(data)
                        else:
                            dataDict[int(tmp[keyIndex])] = [i for i in tmp if i!= tmp[keyIndex]]
        if useDb:
            print("create index for database...")
            collection.create_index([('k', pymongo.ASCENDING)], unique=True)
        # just a placeholder file if using database
        with open(serializationDataPath, 'wb') as serializationFile:
            pickle.dump(dataDict, serializationFile, True)
    else:
        if not useDb:
            with open(serializationDataPath, 'rb') as serializationFile:
                dataDict = pickle.load(serializationFile)
            

    print("getDateDict %s cos %s"%(dataPath, time.time() - start))
    return dataDict


# get final data from files
def loadSampleData(subPath=''):
    start = time.time()
    xArray = []
    yArray = []
    userIdArray = []
    clickTimeArray = []
    #for parent, dirnames, filenames in os.walk(conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/date=11/'):
    for parent, dirnames, filenames in os.walk(conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/' + subPath):
        #print(filenames)
        for filename in filenames:
            innerStart = time.time()

            if not filename.endswith('db'):
                continue

            trainFilePath = os.path.join(parent, filename)
            print(trainFilePath)

            # data for one train file
            xArrayPerFile = []
            yArrayPerFile = []

            # prapre data each file
            dataDict = {}
            with open(trainFilePath, 'rb') as trainFile:
                dataDict = pickle.load(trainFile)

            xArrayPerFile = dataDict['x']
            yArrayPerFile = dataDict['y']
            userIdarrayPerFile = dataDict['userIds']
            clickTimePerFile = dataDict['clickTime']

            print("fexture:%s x:%s y:%s userIds:%s clickTime:%s"%(len(xArrayPerFile[0]), len(xArrayPerFile), len(yArrayPerFile), len(userIdarrayPerFile), len(clickTimePerFile)))

            xArray += xArrayPerFile
            yArray += yArrayPerFile
            userIdArray += userIdarrayPerFile
            clickTimeArray += clickTimePerFile

    print("all files read complete cost: %s" % (time.time() - start))
    return xArray, yArray, userIdArray, clickTimeArray

# get origin data and process them from one file
def ProcessOneFileData(trainFilePath, adDict, userDict, appDict, useDb, collection, predicting=False):
    print(trainFilePath)
    xArrayPerFile = []
    yArrayPerFile = []
    idArrayPerFile = []
    userIdArrayPerFile = []
    clickTimePerFile = []

    dbQueryCount = 0
    dbQueryCost = 0

    innerStart = time.time()

    with open(trainFilePath) as trainFile:
        for line in trainFile:
            tmp = line.split()

            if len(tmp) == 0:
                continue

            # id array
            idArrayPerFile.append(tmp[0])

            # covert connection type to int
            ct = tmp[6]
            if ct == 'WIFI':
                cti = 1
            elif ct == 'MOBILE':
                cti = 2
            else:
                cti = 0
            tmp[6] = cti

            # y
            y = int(tmp[1])

            # x
            tmp = tmp[2:]
            x = []

            # do not add ad_id and user_id into features, add ad info and user info instead
            ad_id_index = 1
            user_id_index = 2
            hasValidData = True
                         
            shouldAddFeatures = True
            for i, s in enumerate(tmp):
                if i == ad_id_index and shouldAddFeatures:
                    adInfoArray = list(adDict[int(s)]) # make a copy
                    # do not add app_id into features, add app info instead
                    app_id_index = 2 # index after key removed
                    app_id = int(adInfoArray[app_id_index])
                                
                    appInfoArray = list(appDict[app_id]) # make a copy
                    del appInfoArray[0] #app_description:  123,123,123 or 123
                    del adInfoArray[app_id_index]

                    adInfoArray.extend(appInfoArray)
                    addFeatureArray = [int(i) for i in adInfoArray if canConvertToInt(i)]
                    assert len(addFeatureArray) == 4

                    x.extend(addFeatureArray)
                elif i == user_id_index and shouldAddFeatures:
                    userInfoArray = []
                    if not useDb:
                        userInfoArray = list(userDict[int(s)]) # make a copy
                    else:
                        ss = time.time()
                        doc = collection.find_one({'k':s})
                        dbQueryCount += 1
                        dbQueryCost += time.time()-ss
                        userInfoArray = doc['v']
                    if len(userInfoArray) != 7:
                        print("bad user info for user_id: %s"%(s))
                        if not predicting:
                            hasValidData = False;
                            break
                        else:
                            userInfoArray = [0]*7
                            userIdArrayPerFile.append(s)
                            clickTimePerFile.append(tmp[0])
                    else:
                        userIdArrayPerFile.append(s)
                        clickTimePerFile.append(tmp[0])
                    del userInfoArray[6] # app_installedList:  123,123,123 or 123

                    gradeStr = userInfoArray[2]
                    if gradeStr == '\\N':
                        #print("no gradeStr")
                        userInfoArray[2] = 0;
                    else:
                        if gradeStr not in gradeArray:
                            print(gradeStr)
                            gradeArray.append(gradeStr)
                        userInfoArray[2] = gradeArray.index(gradeStr) + 1

                    addFeatureArray = [int(i) for i in userInfoArray if canConvertToInt(i)]
                    assert len(addFeatureArray) == 6

                    x.extend(addFeatureArray)
                else:
                    x.append(int(s))

            if hasValidData:
                #print("len(x):%s"%(len(x)))
                if shouldAddFeatures:
                    assert len(x) == 16
                else:
                    assert len(x) == 8

                xArrayPerFile.append(x)
                yArrayPerFile.append(y)

    print("end read cost %s db queryCount: %s queryCost: %s" % (time.time() - innerStart, dbQueryCount, dbQueryCost))

    return xArrayPerFile, yArrayPerFile, idArrayPerFile, userIdArrayPerFile, clickTimePerFile

def batch_generator(X, y, sparseX, batch_size, samples_per_epoch, steps_per_epoch):
    start = time.time()
    number_of_batches = math.floor(samples_per_epoch/batch_size)
    counter=0
    shuffle_index = np.arange(y.shape[0])
    np.random.shuffle(shuffle_index)
    #X =  X[shuffle_index, :]
    sparseX = sparseX[shuffle_index, :]
    y =  y[shuffle_index]
    
    prepareSparseData = True
    if prepareSparseData:
        print("batch_generator pareparing ...")
        arrayLen = math.floor(sparseX.shape[0] / conf.BATCH_SIZE)
        if sparseX.shape[0] % conf.BATCH_SIZE != 0:
            arrayLen += 1

        featuresBatchArray = []
        for index in range(arrayLen):
            if index != arrayLen - 1:
                featuresBatchArray.append(sparseX[index*conf.BATCH_SIZE:(index+1)*conf.BATCH_SIZE, :])
            else:
                featuresBatchArray.append(sparseX[index*conf.BATCH_SIZE:sparseX.shape[0], :])
        assert(len(featuresBatchArray) == steps_per_epoch)
        print("batch_generator prepare cost: %s"%(time.time() - start))

    while 1:
        loopStart = time.time()
        index_batch = shuffle_index[batch_size*counter:batch_size*(counter+1)]
        #X_batch = X[index_batch,:]
        start = time.time()
        sparseX_batch = []
        if prepareSparseData:
            sparseX_batch = featuresBatchArray[counter]
        else:
            sparseX_batch = sparseX[index_batch,:]
        #print("sparse index cost: %s"%(time.time() - start))
        start = time.time()
        sparseX_batch = sparseX_batch.toarray()
        #print("sparse to array cost: %s"%(time.time() - start))
        start = time.time()
        #mean = np.sum(sparseX_batch, axis=0) / len(sparseX_batch)
        #squareMean = np.sum(sparseX_batch**2, axis=0) / len(sparseX_batch)
        #sparseX_batch = (sparseX_batch - mean) / (squareMean + 1e-17)
        #print("normalize cost: %s"%(time.time() - start))
        y_batch = y[index_batch]
        counter += 1
        #yield(np.concatenate((X_batch, sparseX_batch), axis=1),y_batch)
        #print("batch_generator loop cost: %s"%(time.time() - loopStart))
        yield(sparseX_batch,y_batch)
        if (counter >= number_of_batches):
            if samples_per_epoch%batch_size != 0:
                index_batch = shuffle_index[batch_size*counter:samples_per_epoch]
                #X_batch = X[index_batch,:]
                sparseX_batch = []
                if prepareSparseData:
                    sparseX_batch = featuresBatchArray[counter]
                else:
                    sparseX_batch = sparseX[index_batch,:]
                sparseX_batch = sparseX_batch.toarray()
                #mean = np.sum(sparseX_batch, axis=0) / len(sparseX_batch)
                #squareMean = np.sum(sparseX_batch**2, axis=0) / len(sparseX_batch)
                #sparseX_batch = (sparseX_batch - mean) / (squareMean + 1e-17)
                y_batch = y[index_batch]
                #yield(np.concatenate((X_batch, sparseX_batch), axis=1),y_batch)
                yield(sparseX_batch,y_batch)
            #np.random.shuffle(shuffle_index)
            counter=0

def sparseMatrixTest(sparseMatrix, denseMatrix):
    startIndices = [100000, 200000, 300000, 400000]
    rows = [512, 1024, 2048, 4096, 10240]
    for startIndex in startIndices:
        for row in rows:
            print(" -------start:%s row:%s----- " %(startIndex, row))
            testSparseMatrix = sparseMatrix[startIndex:startIndex+row,]
            csc = csc_matrix(testSparseMatrix)
            csr = csr_matrix(testSparseMatrix)
            lil = lil_matrix(testSparseMatrix)
            dok = dok_matrix(testSparseMatrix)

            start = time.time()
            dense = csc.toarray()
            print("csc %s toarray cost: %s"%(dense.shape, time.time() - start))
            start = time.time()
            dense = csr.toarray()
            print("csr %s toarray cost: %s"%(dense.shape, time.time() - start))
            start = time.time()
            dense = lil.toarray()
            print("lil %s toarray cost: %s"%(dense.shape, time.time() - start))
            start = time.time()
            dense = dok.toarray()
            print("dok %s toarray cost: %s"%(dense.shape, time.time() - start))

            testDenseMatrix = denseMatrix[startIndex:startIndex+row,]
            start = time.time()
            np.concatenate((testDenseMatrix, dense), axis=1)
            print("concatenate dense cost: %s"%(time.time() - start))
    quit()

def prepareSampleData(useDb):
    collection = {}
    serializationPlaceholderPath = conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/pythonData'
    if not os.path.exists(serializationPlaceholderPath):
        adDict = getAd()
        userDict = getUserProfile(useDb)
        appDict = getAppCategory()
        
        if useDb:
            dbClient = MongoClient()
            db = dbClient.dmContest
            collection = db.userProfile

        #for parent, dirnames, filenames in os.walk(conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/date=11/'):
        for parent, dirnames, filenames in os.walk(conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/'):
            #print(filenames)
            for filename in filenames:
                if not filename.endswith('txt'):
                    continue

                trainFilePath = os.path.join(parent, filename)

                # data for one train file
                serializationDataPath = trainFilePath + '.db'
                xArrayPerFile, yArrayPerFile, idArray, userIdArray, clickTimeArray = ProcessOneFileData(trainFilePath, adDict, userDict, appDict, useDb, collection, predicting=False)

                with open(serializationDataPath, 'wb') as serializationFile:
                    # clear all
                    serializationFile.truncate()
                    pickle.dump({'x':xArrayPerFile, 'y':yArrayPerFile, 'userIds': userIdArray, 'clickTime': clickTimeArray}, serializationFile, True)

        # placeHolder
        with open(serializationPlaceholderPath, 'wb') as serializationFile:
            pickle.dump({}, serializationFile, True)

def prepareAppData(useDb=False,predict=False,adDict=None,userDict=None,appDict=None,userIdArray=None, clickTimeArray=None):

    # append user's app usage info
    serializationAppInstallPath = conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/appInstallFeatures.data'
    if predict:
        serializationAppInstallPath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/appInstallFeatures.data'
    if not os.path.exists(serializationAppInstallPath):

        if adDict == None:
            adDict = getAd()
        if userDict == None:
            userDict = getUserProfile(False)
        if appDict == None:
            appDict = getAppCategory()
        
        if not predict:
            xArray, yArray, userIdArray, clickTimeArray = loadSampleData()
            xArray = []
            yArray = []
        else:
            collection = {}
            if userIdArray == None:
                predictDataFilePath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/test_all'
                xArray, yArray, idArray, userIdArray, clickTimeArray = ProcessOneFileData(predictDataFilePath, adDict, userDict, appDict, False, collection, predicting=True)
                xArray = []
                yArray = []



        print("appInstallFeatures.data no exsit, process...")
        appInstalledFeatures = []
        appUseTimeFeatures = []
        appUsercountFeatures = []
        appActionFeatures = []


        start = time.time()
        appIds = list(appDict.keys())
        appIdsLen = len(appIds)
        appIdDict = {}
        for i, id in enumerate(appIds):
            appIdDict[id] = i
        print("prepare app id dict cost: %s"%(time.time() - start) ) 
        

        start = time.time()
        dataIndex = 0
        i = 0
        badUserInfoCount = 0
        emptyInstallCount = 0
        userAppInstallDict = {}
        for userId in userIdArray:
            # app installed features
            userInfoArray = userDict[int(userId)]
            if len(userInfoArray) != 7:
                badUserInfoCount += 1
                app_installedListStr = ''
            else:
                app_installedListStr = userInfoArray[6]
            
            app_installedList = app_installedListStr.split(',')
            appInstalledFeature = []
            for appId in app_installedList:
                if canConvertToInt(appId):
                    index = appIdDict[int(appId)]
                    indexStr = str(index)
                    #indexStr = Util.baseN(index, 62)
                    appInstalledFeature.append(indexStr)

            if i%10000 == 0 :
                print("%s user: %s installed: %s cost: %s"%(i, userId, appInstalledFeature, time.time() - start))
                start = time.time()

            if len(appInstalledFeature) == 0:
                appInstalledFeature.append('')
                emptyInstallCount += 1

            appInstalledFeatures.append( (appInstalledFeature,) )
                

            if len(appInstalledFeatures) == 1000000:
                batchNum = ''
                if dataIndex < 10:
                    batchNum = '0' + str(dataIndex)
                else:
                    batchNum = str(dataIndex)
                serializationDataPath = conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/appInstallFeature' + batchNum + '.dataBatch'
                if predict:
                    serializationDataPath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/appInstallFeature' + batchNum + '.dataBatch'
                with open(serializationDataPath, 'wb') as file:
                    pickle.dump(appInstalledFeatures, file, True)
                    appInstalledFeatures = []
                    print("%s : %s dump %s"%(i, len(appInstalledFeatures), serializationDataPath))
                dataIndex += 1
            i += 1

            # app use time features

            # app use count features

            # app action features

        # last day
        batchNum = ''
        if dataIndex < 10:
            batchNum = '0' + str(dataIndex)
        else:
            batchNum = str(dataIndex)
        serializationDataPath = conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/appInstallFeature' + batchNum + '.dataBatch'
        if predict:
            serializationDataPath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/appInstallFeature' + batchNum + '.dataBatch'
        with open(serializationDataPath, 'wb') as file:
            pickle.dump(appInstalledFeatures, file, True)
            appInstalledFeatures = []
            print("%s : %s dump %s"%(i, len(appInstalledFeatures), serializationDataPath))

        print("userIdArray len: %s"%(len(userIdArray)))
        print("userDict len: %s"%(len(userDict)))
        print("badUserInfoCount: %s"%(badUserInfoCount))
        print("emptyInstallCount: %s"%(emptyInstallCount))

        # free memroy
        appDict = {}
        userDict = {}

        # place holder
        with open(serializationAppInstallPath, 'wb') as file:
            pickle.dump({}, file, True)

def fitWord2vec(appInstallFeatures, spark, vectorSize):

    documentDF = spark.createDataFrame(appInstallFeatures, ["text"])
    # free memory
    appInstallFeatures = []
    #documentDF = spark.createDataFrame(appInstalledFeatures[0:10000], ["text"])
    print("spark.createDataFrame done")
    start = time.time()
    word2Vec = Word2Vec(vectorSize=vectorSize, minCount=0, inputCol="text", outputCol="result")
    model = word2Vec.fit(documentDF)
    print("spark word2Vec.fit cost: %s"%(time.time() - start))
    start = time.time()
    result = model.transform(documentDF)
    print("spark model.transform cost: %s"%(time.time() - start))
    start = time.time()
    vectorArray = []
    for row in result.collect():
        text, vector = row
        vectorArray.append(vector)
    vectorArray = np.array(vectorArray)
    print("spark model to vector cost : %s"%(time.time() - start))

    return vectorArray

def transformAppData(predict=False):
    serializationPlaceholderPath = conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/appTransformedData'
    if predict:
        serializationPlaceholderPath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/appTransformedData'
    if not os.path.exists(serializationPlaceholderPath):

        # read app install data from dataBatch files
        appInstallFeaturesCount = 0
        spark = SparkSession.builder.appName('appName').master('local').config("spark.local.dir", conf.SPARK_TEMP_PATH).config("spark.driver.cores", conf.SPARK_CORE).config("spark.driver.maxResultSize","8g").config("spark.driver.memory","8g").config("spark.executor.memory","8g").getOrCreate()
        path = conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/'
        if predict:
            path = conf.ROOT_DATA_FILE_PATH + 'contest_testset/'
        for parent, dirnames, filenames in os.walk(path):
            dataIndex = 0
            for filename in filenames:
                innerStart = time.time()

                if not filename.endswith('dataBatch'):
                    continue

                filePath = os.path.join(parent, filename)
                print(filePath)

                # prapre data each file
                appInstallFeaturesPer1m = []
                with open(filePath, 'rb') as file:
                    appInstallFeaturesPer1m = pickle.load(file)
                    appInstallFeaturesCount += len(appInstallFeaturesPer1m)
                    print("%s len: %s"%(filename, len(appInstallFeaturesPer1m)))
                    print(appInstallFeaturesPer1m[0])

                vectorArray = fitWord2vec(appInstallFeaturesPer1m, spark, 6)

                if dataIndex < 10:
                    batchNum = '0' + str(dataIndex)
                else:
                    batchNum = str(dataIndex)
                serializationDataPath = conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/appInstallFeature' + batchNum + '.transformed'
                if predict:
                    serializationDataPath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/install/appInstallFeature' + batchNum + '.transformed'
                with open(serializationDataPath, 'wb') as file:
                    print(vectorArray[0])
                    pickle.dump(vectorArray, file, True)

                dataIndex += 1

        print("appInstallFeaturesCount: %s"%(appInstallFeaturesCount))

        with open(serializationPlaceholderPath, 'wb') as file:
            pickle.dump({}, file, True)


def loadTransformedAppData(predict = False):
    print("loadTransformedAppData")
    appInstalledData = []
    path = conf.ROOT_DATA_FILE_PATH + 'contest_dataset_label/'
    if predict:
        path = conf.ROOT_DATA_FILE_PATH + 'contest_testset/install/'
    for parent, dirnames, filenames in os.walk(path):
        for filename in filenames:
            innerStart = time.time()

            if not filename.endswith('transformed'):
                continue

            filePath = os.path.join(parent, filename)
            print(filePath)

            appInstalledDataPerFile = []
            with open(filePath, 'rb') as file:
                appInstalledDataPerFile = pickle.load(file)
                appInstalledData.extend(appInstalledDataPerFile)

    return np.array(appInstalledData)


# get label & test data
def getData(useDb=False):
    
    xArray, yArray, userIdArray, clickTimeArray = loadSampleData()

    # np data
    dataSize = len(xArray)
    print("total data size: " + str(dataSize))
    assert dataSize > conf.TEST_SIZE
    assert len(xArray) == len(yArray) == len(userIdArray) == len(clickTimeArray)

    #xArray = np.array(xArray, dtype=np.int64)
    #yArray = np.array(yArray, dtype=np.int64)
    xArray = np.array(xArray, dtype=np.float64)
    yArray = np.array(yArray, dtype=np.float64)

    print("xArray.shape: %s "%(str(xArray.shape)))

    # in case some data is too big
    #xArray = xArray/1000

    # app install
    prepareSampleData(useDb)
    prepareAppData(useDb)
    transformAppData()
    appInstalledData = loadTransformedAppData()
    print("appInstalledData.shape: %s"%(str(appInstalledData.shape)))
    assert len(xArray) == len(appInstalledData)
    xArray = np.concatenate((xArray, appInstalledData), axis=1)
    appInstalledData = []

    # app action
    appActionData = appAction.loadTransformData()
    print("appActionData.shape: %s"%(str(appActionData.shape)))
    assert len(xArray) == len(appActionData)
    xArray = np.concatenate((xArray, appActionData), axis=1)
    appActionData = []
    # app usage
    appUsageData = appUsage.loadTransformData()
    print("appUsageData.shape: %s"%(str(appUsageData.shape)))
    assert len(xArray) == len(appUsageData)
    xArray = np.concatenate((xArray, appUsageData), axis=1)
    appUsageData = []

    # concatenate pipeline data
    xArray = np.concatenate((xArray, __getNewsFeed__(inputUserIdArray=userIdArray, inputClickTimeArray=clickTimeArray)), axis=1) # newsfeed
    xArray = np.concatenate((xArray, __getShopping__(inputUserIdArray=userIdArray, inputClickTimeArray=clickTimeArray)), axis=1) # shopping
    xArray = np.concatenate((xArray, __getQuery__(inputUserIdArray=userIdArray, inputClickTimeArray=clickTimeArray)), axis=1) # query


    # normalize input
    start = time.time()
    xMean = np.sum(xArray, axis = 0) / dataSize
    print("xMean.shape: %s value:%s"%(xMean.shape, xMean))
    xMeanSquare = np.sum(xArray**2, axis = 0) / dataSize
    print("xMeanSquare.shape: %s value:%s"%(xMeanSquare.shape, xMeanSquare))
    xArray = (xArray - xMean) / (xMeanSquare + 1e-17)


    # data for train & test
    xTrain = xArray[:-conf.TEST_SIZE]
    yTrain = yArray[:-conf.TEST_SIZE]

    xTest = xArray[-conf.TEST_SIZE:]
    yTest = yArray[-conf.TEST_SIZE:]
    end = time.time()
    print("np.array operation cost: %s" % (end - start))
    print('train data shape: ' + str(xTrain.shape))

    return xTrain, yTrain, xTest, yTest



# get predict data
def getPredictData(forceReload=False, useDb=False):
    idArray = []
    xArray = []

    serializationDataPath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/pythonData'
    serializationIdPath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/idData'
    trainFilePath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/test_all'
    if forceReload or not os.path.exists(serializationDataPath) or not os.path.exists(serializationIdPath):
        adDict = getAd()
        userDict = getUserProfile(useDb)
        appDict = getAppCategory()
        
        if useDb:
            dbClient = MongoClient()
            db = dbClient.dmContest
            collection = db.userProfile
        else:
            collection = {}

        # train file
        xArray, yArray, idArray, userIdArray, clickTimeArray = ProcessOneFileData(trainFilePath, adDict, userDict, appDict, useDb, collection, predicting=True)
        dataSize = len(xArray)
        idSize = len(idArray)
        assert dataSize == idSize
        print("dataSize: %s idSize: %s"%(dataSize, idSize))
        xArray = np.array(xArray, dtype=np.float64)
       

        # concatenate pipe data
        xArray = np.concatenate((xArray, __getNewsFeed__(isPredict=True, inputUserIdArray=userIdArray, inputClickTimeArray=clickTimeArray)), axis=1)  # newsfeed
        xArray = np.concatenate((xArray, __getShopping__(isPredict=True, inputUserIdArray=userIdArray, inputClickTimeArray=clickTimeArray)), axis=1)  # shopping
        xArray = np.concatenate((xArray, __getQuery__(isPredict=True, inputUserIdArray=userIdArray, inputClickTimeArray=clickTimeArray)), axis=1)  # query


        # app action
        appActionData = appAction.loadTransformData(predict=True)
        print("appActionData.shape: %s"%(str(appActionData.shape)))
        assert len(xArray) == len(appActionData)
        xArray = np.concatenate((xArray, appActionData), axis=1)
        appActionData = []
        # app usage
        appUsageData = appUsage.loadTransformData(predict=True)
        print("appUsageData.shape: %s"%(str(appUsageData.shape)))
        assert len(xArray) == len(appUsageData)
        xArray = np.concatenate((xArray, appUsageData), axis=1)
        appUsageData = []

        # app install
        prepareAppData(useDb=False, predict=True, adDict=adDict,userDict=userDict,appDict=appDict,userIdArray=userIdArray, clickTimeArray=clickTimeArray )
        adDict = []
        userDict = []
        appDict = []

        transformAppData(predict=True)
        appInstalledData = loadTransformedAppData(predict=True)
        print("appInstalledData.shape: %s"%(str(appInstalledData.shape)))
        assert len(xArray) == len(appInstalledData)
        xArray = np.concatenate((xArray, appInstalledData), axis=1)
        appInstalledData = []

        # normalize input
        xMean = np.sum(xArray, axis = 0) / dataSize
        xMeanSquare = np.sum(xArray**2, axis = 0) / dataSize

        # data for train & test
        xArray = (xArray - xMean) / (xMeanSquare + 1e-17)


        # serilization file
        with open(serializationDataPath, 'wb') as file:
            pickle.dump(xArray, file, True)

        # id file
        with open(serializationIdPath, 'wb') as idFile:
            pickle.dump(idArray, idFile, True)
    else:
        with open(serializationDataPath, 'rb') as file:
            xArray = pickle.load(file)
        with open(serializationIdPath, 'rb') as idFile:
            idArray = pickle.load(idFile)

    return idArray, xArray

# 获取newsfeed数组，格式为nparray
def __getNewsFeed__(isPredict = False, inputUserIdArray = [], inputClickTimeArray = []):
    return __getPipeData__(conf.ROOT_DATA_FILE_PATH + 'contest_dataset_newsfeed/', isPredict, inputUserIdArray=inputUserIdArray, inputClickTimeArray=inputClickTimeArray)

# 获取shopping数组，格式为nparray
def __getShopping__(isPredict = False, inputUserIdArray = [], inputClickTimeArray = []):
    return __getPipeData__(conf.ROOT_DATA_FILE_PATH + 'contest_dataset_shopping/', isPredict, inputUserIdArray=inputUserIdArray, inputClickTimeArray=inputClickTimeArray)

# 获取query数组，格式为nparray
def __getQuery__(isPredict = False, inputUserIdArray = [], inputClickTimeArray = []):
    return __getPipeData__(conf.ROOT_DATA_FILE_PATH + 'contest_dataset_query/', isPredict, inputUserIdArray=inputUserIdArray, inputClickTimeArray=inputClickTimeArray)

# 获取流水数据对应的vector数组，格式为nparray
def __getPipeData__(rootPath, isPredict = False, inputUserIdArray = [], inputClickTimeArray = []):

    # 最后vector文件保存的地址
    if not isPredict:
        savedPath = rootPath + 'br/'
    else:
        savedPath = rootPath + 'tbr/'

    # 创建目录
    if not os.path.exists(savedPath):
        os.makedirs(savedPath)

    # 是否已保存vector
    isSaved = os.path.exists(savedPath + 'complete') #完成的标记

    # 获取userArray和clickTimeArray
    userIdArray, clickTimeArray = inputUserIdArray, inputClickTimeArray
    # if not isPredict:
    #     # _, _, userIdArray, clickTimeArray = loadSampleData()
    # else:
    #     userIdArray, clickTimeArray = inputUserIdArray, inputClickTimeArray


    assert len(userIdArray) == len(clickTimeArray)

    if isSaved:
        i = 1
        arrayList = []
        while True:
            path = savedPath + str(i)
            if os.path.exists(path):
                print('load vector for file:{}'.format(path))
                with open(path, 'rb') as f:
                    arrayList += pickle.load(f)
                i += 1
            else:
                break
        return np.array(arrayList)
    else:
        dataList = []
        tmpList = []
        pipe = PipeUtil(rootPath)
        fileIndex = -1

        step = 100000
        if rootPath.find('contest_dataset_shopping') != -1:
            step = 1000000

        for i in range(len(userIdArray)):
            if i != 0:
                fileIndex = i % step

            if fileIndex == 0:
                print('i:{}'.format(i))

            userId = userIdArray[i]
            clickTime = clickTimeArray[i]
            tmpList.append(pipe.vector(userId, clickTime, isPredict))

            if fileIndex == 0 or i == len(userIdArray) - 1:
                print('create data frame')
                testDF = pipe.spark.createDataFrame([(i.split(','),) for i in tmpList], ['content'])
                vecArray = []
                print('transform data frame')
                for row in pipe.model.transform(testDF).collect():
                    _, vector = row
                    vecArray.append(vector)
                dataList += vecArray
                tmpList = []

                # 存到本地
                pathIndex = int(i / step)
                if i == len(userIdArray) - 1 and fileIndex != 0:
                    pathIndex += 1

                path = savedPath + str(pathIndex)
                comletePath = savedPath + 'complete'
                print('save vector for file:{}'.format(path))
                with open(path, 'wb') as f:
                    pickle.dump(vecArray, f, True)

                # 完成标记
                if i == len(userIdArray) - 1:
                    with open(comletePath, 'wb') as f:
                        pickle.dump('complete', f, True)

        npArray = np.array(dataList)

        # 获取对应vector
        # print('create data frame')
        # testDF = pipe.spark.createDataFrame([(newsTotal.split(','),)], ['content'])
        # print('transform data')
        # result = pipe.model.transform(testDF).head().result

        # print('create data frame')
        # testDF = pipe.spark.createDataFrame([(i.split(','),) for i in dataList], ['content'])
        # vecArray = []
        # for row in pipe.model.transform(testDF).collect():
        #     _, vector = row
        #     vecArray.append(vector)
        # npArray = np.array(vecArray)

        # # 保存
        # print('save vector for file:{}'.format(rootPath))
        # with open(savedPath, 'wb') as f:
        #     pickle.dump(npArray, f, True)

    return npArray

# 获取流水数据对应的vector数组，格式为nparray
def __preparePipeData__(rootPath, isPredict = False, inputUserIdArray = [], inputClickTimeArray = []):

    # 最后vector文件保存的地址
    if not isPredict:
        savedPath = rootPath + 'br/'
    else:
        savedPath = rootPath + 'tbr/'

    # 创建目录
    if not os.path.exists(savedPath):
        os.makedirs(savedPath)

    # 是否已保存vector
    isSaved = os.path.exists(savedPath + 'complete') #完成的标记

    # 获取userArray和clickTimeArray
    userIdArray, clickTimeArray = inputUserIdArray, inputClickTimeArray
    # if not isPredict:
    #     # _, _, userIdArray, clickTimeArray = loadSampleData()
    # else:
    #     userIdArray, clickTimeArray = inputUserIdArray, inputClickTimeArray


    assert len(userIdArray) == len(clickTimeArray)

    if isSaved:
        pass
        # i = 1
        # arrayList = []
        # while True:
        #     path = savedPath + str(i)
        #     if os.path.exists(path):
        #         print('load vector for file:{}'.format(path))
        #         with open(path, 'rb') as f:
        #             arrayList += pickle.load(f)
        #         i += 1
        #     else:
        #         break
        # return np.array(arrayList)
    else:
        # dataList = []
        tmpList = []
        pipe = PipeUtil(rootPath)
        fileIndex = -1

        step = 100000
        if rootPath.find('contest_dataset_shopping') != -1:
            step = 1000000

        for i in range(len(userIdArray)):
            if i != 0:
                fileIndex = i % step

            if fileIndex == 0:
                print('i:{}'.format(i))

            userId = userIdArray[i]
            clickTime = clickTimeArray[i]
            tmpList.append(pipe.vector(userId, clickTime, isPredict))

            if fileIndex == 0 or i == len(userIdArray) - 1:
                print('create data frame')
                testDF = pipe.spark.createDataFrame([(i.split(','),) for i in tmpList], ['content'])
                vecArray = []
                print('transform data frame')
                for row in pipe.model.transform(testDF).collect():
                    _, vector = row
                    vecArray.append(vector)
                # dataList += vecArray
                tmpList = []


                # 存到本地
                pathIndex = int(i / step)
                if i == len(userIdArray) - 1 and fileIndex != 0:
                    pathIndex += 1

                path = savedPath + str(pathIndex)
                comletePath = savedPath + 'complete'
                print('save vector for file:{}'.format(path))
                with open(path, 'wb') as f:
                    pickle.dump(vecArray, f, True)
                vecArray = []

                # 完成标记
                if i == len(userIdArray) - 1:
                    with open(comletePath, 'wb') as f:
                        pickle.dump('complete', f, True)

        # npArray = np.array(dataList)

        # 获取对应vector
        # print('create data frame')
        # testDF = pipe.spark.createDataFrame([(newsTotal.split(','),)], ['content'])
        # print('transform data')
        # result = pipe.model.transform(testDF).head().result

        # print('create data frame')
        # testDF = pipe.spark.createDataFrame([(i.split(','),) for i in dataList], ['content'])
        # vecArray = []
        # for row in pipe.model.transform(testDF).collect():
        #     _, vector = row
        #     vecArray.append(vector)
        # npArray = np.array(vecArray)

        # # 保存
        # print('save vector for file:{}'.format(rootPath))
        # with open(savedPath, 'wb') as f:
        #     pickle.dump(npArray, f, True)

    # return npArray

# 洗newsfeed数据
def __prepareNewsFeed__(isPredict = False, inputUserIdArray = [], inputClickTimeArray = []):
    return __preparePipeData__(conf.ROOT_DATA_FILE_PATH + 'contest_dataset_newsfeed/', isPredict, inputUserIdArray=inputUserIdArray, inputClickTimeArray=inputClickTimeArray)

# 洗query数组
def __prepareQuery__(isPredict = False, inputUserIdArray = [], inputClickTimeArray = []):
    return __preparePipeData__(conf.ROOT_DATA_FILE_PATH + 'contest_dataset_query/', isPredict, inputUserIdArray=inputUserIdArray, inputClickTimeArray=inputClickTimeArray)