#coding=utf-8

import os
import math
import Config as conf
import numpy as np
import time
import pickle

from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark.ml.feature import Word2Vec
from pyspark.ml.feature import HashingTF, IDF, Tokenizer

#import DataProcessor as dp



dataPath = conf.ROOT_DATA_FILE_PATH + 'contest_dataset_app_actions/'
predictDataPath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/'
serializationPath = dataPath + 'pythonData'

def prepareData():
    if not os.path.exists(serializationPath):
        for i in range(30):
            dataDict = {}
            subPath = ''
            if i + 1 < 10:
                subPath = 'date=0' + str(i+1) + '/'
            else:
                subPath = 'date=' + str(i+1) + '/'
            print(dataPath + subPath)
            for parent, dirnames, filenames in os.walk(dataPath + subPath):
                for filename in filenames:
                    if not "part-" in filename:
                        continue

                    filePath = os.path.join(parent, filename)
                    with open(filePath) as file:
                        for line in file:
                            tmp = line.split()
                            userId = tmp[0]
                            value = {"app_id": tmp[1], "action_type": tmp[2], "action_time": tmp[3]}
                            if userId in dataDict:
                                dataDict[userId].append(value)
                            else:
                                dataDict[userId] = [value]

            with open(dataPath + subPath + 'pythonData', 'wb') as file:
                file.truncate()
                pickle.dump(dataDict, file, True)

        # write placeholder
        with open(serializationPath, 'wb') as file:
            pickle.dump({}, file, True)



def loadRawData(day):
    if int(day) < 10:
        subPath = 'date=0' + day + '/'
    else:
        subPath = 'date=' + day + '/'
    print("appUsage load: " + subPath)
    filePath = dataPath + subPath + 'pythonData'
    if os.path.exists(filePath):
        dataDict = {}
        with open(dataPath + subPath + 'pythonData', 'rb') as file:
            dataDict = pickle.load(file)
        #print(dataDict['10483707'])
        return dataDict
    else:
        print("%s no exsit"%(filePath))
        return {}

def transformData(predict=False):

    placeHolderPath = dataPath + 'transformData'
    if predict:
        placeHolderPath = predictDataPath + 'action/transformData'

    if not os.path.exists(placeHolderPath):

        spark = SparkSession.builder.appName('appName').master('local').config("spark.local.dir", conf.SPARK_TEMP_PATH).config("spark.driver.cores", conf.SPARK_CORE).config("spark.driver.maxResultSize","8g").config("spark.driver.memory","8g").config("spark.executor.memory","8g").getOrCreate()

        print("appAction.transformData")
        if not predict:
            xArray, yArray, userIdArray, clickTimeArray = dp.loadSampleData()
            #xArray, yArray, userIdArray, clickTimeArray = dp.loadSampleData(subPath='date=11/')
        else:
            adDict = dp.getAd()
            userDict = dp.getUserProfile(False)
            appDict = dp.getAppCategory()
        
            collection = {}
            predictDataFilePath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/test_all'
            xArray, yArray, idArray, userIdArray, clickTimeArray = dp.ProcessOneFileData(predictDataFilePath, adDict, userDict, appDict, False, collection, predicting=True)

            adDict = []
            userDict = []
            appDict = []

        xArray = []
        yArray = []

        if False:
            xArray, yArray, userIdArray1, clickTimeArray1 = dp.loadSampleData(subPath='date=12/')
            xArray = []
            yArray = []

            xArray, yArray, userIdArray2, clickTimeArray2 = dp.loadSampleData(subPath='date=13/')
            xArray = []
            yArray = []

            userIdArray.extend(userIdArray1)
            userIdArray.extend(userIdArray2)

            clickTimeArray.extend(clickTimeArray1)
            clickTimeArray.extend(clickTimeArray2)

        appDict = dp.getAppCategory()
        start = time.time()
        appIds = list(appDict.keys())
        appIdsLen = len(appIds)
        appIdDict = {}
        for i, id in enumerate(appIds):
            appIdDict[id] = i
        print("prepare app id dict cost: %s"%(time.time() - start) ) 


        dataDicts = []
        dataDict = {}
        # data for samples in a day
        downloads = []
        installs = []
        uninstalls = []

        i = 0
        preDateIndex = ''
        noDataUserCount = 0
        userCountPerDay = 0
        for userId in userIdArray:
            clickTime = clickTimeArray[i]
            dateIndex = clickTime[0:2]
            # use previous day's data for predict
            if predict:
                dateIndex = str(int(dateIndex) -3)
            # a new folder
            if preDateIndex != dateIndex:
            
                # transform dataToTransform
                if i != 0:
                    print("fit date=" + str(preDateIndex))
                    serializationDataPath = dataPath + preDateIndex + '.transformed'
                    if predict:
                        serializationDataPath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/action/' + preDateIndex + '.transformed'
                    if not os.path.exists(serializationDataPath):
                        print(installs[0])
                        installs = dp.fitWord2vec(installs, spark, 3)
                        downloads = dp.fitWord2vec(downloads, spark, 3)
                        uninstalls = dp.fitWord2vec(uninstalls, spark, 3)

                        print("installs: %s downloads: %s uninstalls: %s userCountPerDay: %s"%(len(installs),len(downloads),len(uninstalls),userCountPerDay))

                        installs = np.array(installs)
                        downloads = np.array(downloads)
                        uninstalls = np.array(uninstalls)

                        data = np.concatenate((installs, downloads), axis = 1)
                        data = np.concatenate((data, uninstalls), axis = 1)

                        print("data shape: %s"%(str(data.shape)))
                        with open(serializationDataPath, 'wb') as file:
                            print(data[0])
                            file.truncate()
                            pickle.dump(data, file, True)

                # clear dataToTransform
                print("%s - %s at date=%s"%(noDataUserCount, userCountPerDay, preDateIndex))
                downloads = []
                installs = []
                uninstalls = []
                noDataUserCount = 0
                userCountPerDay = 0
                dataDicts = []
                dataDict = {}
                # current day
                dataDict = loadRawData(dateIndex)
                print("%s user has app action data in date=%s"%(len(dataDict.keys()), dateIndex))
                dataDicts.append(dataDict)
                # yesterday
                dataDict = loadRawData( str(int(dateIndex)-1) )
                print("%s user has app action data in date=%s"%(len(dataDict.keys()), str(int(dateIndex)-1)))
                dataDicts.append(dataDict)
                # day before yesterday
                dataDict = loadRawData( str(int(dateIndex)-2) )
                print("%s user has app action data in date=%s"%(len(dataDict.keys()), str(int(dateIndex)-2)))
                dataDicts.append(dataDict)

                preDateIndex = dateIndex

            # data for one sample
            download = []
            install = []
            uninstall = []
            for dict in dataDicts:
                if str(userId) in dict:
                    #print("process data for user:%s"%(userId))
                    values = dict[str(userId)]
                    values = [i for i in values if i["action_time"] < clickTime]
 
                    for value in values:
                        appId = value["app_id"]
                        appIdIndex = appIdDict[int(appId)]
                        action = value["action_type"]
                        if action == "INSTALLED":
                            install.append(str(appIdIndex))
                        if action == "UNINSTALLED":
                            uninstall.append(str(appIdIndex))
                        if action == "DOWNLOADED":
                            download.append(str(appIdIndex))

                else:
                    noDataUserCount += 1
                    #print("%s no data for user: %s total: %s date=%s"%(i, userId, noDataUserCount, dateIndex))

            if len(download) == 0:
                download.append('')
            if len(install) == 0:
                install.append('')
            if len(uninstall) == 0:
                uninstall.append('')

            downloads.append( (download,) )
            installs.append( (install,))
            uninstalls.append( (uninstall,) )
        

            i += 1
            userCountPerDay += 1

        # the last day
        serializationDataPath = dataPath + preDateIndex + '.transformed'
        if predict:
            serializationDataPath = conf.ROOT_DATA_FILE_PATH + 'contest_testset/action/' + preDateIndex + '.transformed'
        if not os.path.exists(serializationDataPath):
            installs = dp.fitWord2vec(installs, spark, 3)
            downloads = dp.fitWord2vec(downloads, spark, 3)
            uninstalls = dp.fitWord2vec(uninstalls, spark, 3)

            print("installs: %s downloads: %s uninstalls: %s userCountPerDay: %s"%(len(installs),len(downloads),len(uninstalls),userCountPerDay))

            installs = np.array(installs)
            downloads = np.array(downloads)
            uninstalls = np.array(uninstalls)

            data = np.concatenate((installs, downloads), axis = 1)
            data = np.concatenate((data, uninstalls), axis = 1)

            print("data shape: %s"%(str(data.shape)))


        
            with open(serializationDataPath, 'wb') as file:
                print(data[0])
                file.truncate()
                pickle.dump(data, file, True)

        # write placeholder
        with open(placeHolderPath, 'wb') as file:
            pickle.dump({}, file, True)

    


def loadTransformData(predict=False):
    print("appAction.loadTransformData")

    data = []
    path = dataPath
    if predict:
        path = predictDataPath + 'action/'

    for parent, dirnames, filenames in os.walk(path):
        for filename in filenames:
            innerStart = time.time()

            if not filename.endswith('transformed'):
                continue

            filePath = os.path.join(parent, filename)
            print(filePath)

            dataPerFile = []
            with open(filePath, 'rb') as file:
                dataPerFile = pickle.load(file)
                data.extend(dataPerFile)

    return np.array(data)


#transformData(predict=False)
#transformData(predict=True)