package com.shenlan.project

import com.clickhouse.jdbc.ClickHouseDataSource
import com.fasterxml.jackson.annotation.JsonIgnore
import org.apache.commons.beanutils.PropertyUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import java.io.File
import java.net.URI
import java.util.*

/**
 * 第一个参数：
 *      数据源类型： 0，基站1，卫星
 *
 * 第二个参数：
 *      原始文件路径，（挂载后的原始文件）
 *
 * 第三个参数：（原spark方法的第一个参数）
 *   0  历史ais报文转成位置tsv
 *   1  历史ais报文转成历史报文tsv
 *   2  卫星ais报文转成位置tsv
 *   3  卫星ais报文转成卫星报文tsv
 *   4  历史ais报文转成位置静态tsv
 *   5  历史ais报文输出5号报文的mmsi和定位类型
 *
 * 第四个参数：（原spark方法的第二个参数）
 *   0：年份 ；1： 月份
 *   (传入文件为路径为年，且该值为1，生成的数据没有按月份分的文件夹，数据直接在年文件夹下)
 *   (传入文件为路径为年月，且该值为1，生成的数据按月份分的文件夹。）（不管路径，该值为0，都是有月份的文件夹）
 *
 * 第五个参数：（不要，不用管，代码内部计算）。hdfs的路径（原spark方法的第三个参数）
 * 第六个参数：（不要，不用管，由第三个参数根据 spark方法中代码逻辑的 对应关系得到）报文/位置。
 *
 */


fun main() {
    val jarPath = UploadService::class.java.protectionDomain.codeSource.location.toURI().path
    val jarDir = File(jarPath).parentFile.absolutePath
    val configPath = File(jarDir, "config.properties").absolutePath

    Properties().apply {
        load(File(configPath).reader())
    }.forEach { entry ->
        PropertyUtils.setProperty(Config, entry.key as String, entry.value as String)
    }

    println(UploadService().fileUpload().apply {
        rawDataPath = Config.rawDataPath
    }.toJson())
}

object Config {
    var rawDataPath: String = "/"
        set(value) {
            field = value
            timeType = if (value.split(Regex("[\\\\/]")).last().length == 4) "0" else "1"
        }
    var dataSourceType: String = "ais"
    var hdfsIp = "192.168.1.41"
    var hdfsPort = "8020"
    var sparkJarPath = "/shenlan/spark"

    var serverIp = "192.168.1.41"
    var serverUserName = "root"
    var serverUserPassword = "SHENLAN@2016"
    var sparkRunningParameter = "--num-executors 2 --executor-cores 4 --executor-memory 3G"
    var sparkType = ""
        set(spark) {
            field = spark
            when (spark) {
                "0" -> this.outType = "position"
                "1" -> this.outType = "datagram"
                "2" -> this.outType = "position"
                "3" -> this.outType = "datagram"
                "5" -> this.outType = "datagram5"
            }
        }

    var clickhouseUrl: String? = null
    var clickhouseUsername: String? = null
    var clickhousePassword: String? = null
    var clickhousePostfix: String? = null

    // 运行中产生的中间参数
    var hdfsPath: String = ""
    var outType: String = ""
    var timeType: String = ""

    // 测试用，生成环境下为空字符串
    var hdfsPathTop = ""
    var chPath = ""
}

class ReturnModel {
    // 原始文件地址
    var rawDataPath: String? = null

    // hdfs属性
    var hdfsFile: FileModel = FileModel()

    // spark属性
    var sparkFile: FileModel = FileModel()

    // ch
    var chFile: ChReturnModel = ChReturnModel()
    var result: String? = null

}

class FileInfoModel {
    var filePath = ""
    var byteSize = "0"
        set(value) {
            field = value
            unitsSize = FileUtil.byteToHigher(value.toLong())
        }

    var unitsSize = ""

    constructor()
    constructor(filePath: String, byteSize: String) {
        this.filePath = filePath
        this.byteSize = byteSize
    }
}

class FileModel {
    // hdfs/spark路径
    var taskPath: String? = null

    var taskStartTime: Date? = null
    var taskEndTime: Date? = null
        set(value) {
            if (value != null && taskStartTime != null) taskUseTime =
                TimeUtil.mmToFormat(value.time - taskStartTime!!.time)
            field = value
        }
    var taskUseTime: String? = null

    // 总文件大小
    var fileSizeTotal: String? = null

    var fileCount: Int? = null
    //  hdfs/spark路径加文件名,文件大小
    @JsonIgnore
    var fileList = mutableListOf<FileInfoModel>()

    var errorMessage: String? = null

    fun computeFileSizeTotal(size: Long) {
        fileSizeTotal = FileUtil.byteToHigher(size)
    }

}

class ChReturnModel {
    var taskStartTime: Date? = null
    var taskEndTime: Date? = null
        set(value) {
            if (value != null && taskStartTime != null) taskUseTime =
                TimeUtil.mmToFormat(value.time - taskStartTime!!.time)
            field = value
        }
    var taskUseTime: String? = null
    var oldCount: Long? = null
    var newCount: Long? = null
    var errorMessage: String? = null
}

class UploadService {
    fun fileUpload(): ReturnModel {
        val returnModel = ReturnModel()

        returnModel.hdfsFile = hdfsFileUpload()
        if (returnModel.hdfsFile.errorMessage != null) {
            returnModel.result = returnModel.hdfsFile.errorMessage!!
            return returnModel
        }

        returnModel.sparkFile = sparkFileUpload()
        if (returnModel.sparkFile.errorMessage != null) {
            returnModel.result = returnModel.sparkFile.errorMessage!!
            return returnModel
        }

        returnModel.chFile = chFileUpload()
        if (returnModel.chFile.errorMessage != null) {
            returnModel.result = returnModel.chFile.errorMessage!!
            return returnModel
        }

        returnModel.result = "成功。" +
                "hdfs上传耗时：${returnModel.hdfsFile.taskUseTime}，" +
                "spark运行耗时：${returnModel.sparkFile.taskUseTime}，" +
                "ch上传耗时：${returnModel.chFile.taskUseTime}。"
        return returnModel
    }

    fun hdfsFileUpload(): FileModel {
        val model = FileModel().apply {
            taskStartTime = Date()
        }

        val dirName = Config.rawDataPath.split(Regex("[/\\\\]")).last()
        val parentHdfsPath = StringBuilder(Config.hdfsPathTop).apply {
            append(
                when (Config.dataSourceType) {
                    "ais" -> "/ais/history"
                    else -> "/wxais/history"
                }
            )
            // 如果上传的是月份数据，需要处于年份目录内
            if (dirName.length > 4) {
                append("/${dirName.substring(0, 4)}")
            }
        }.toString()
        val hdfsPath = "$parentHdfsPath/${dirName}"

        val rawDataLengthMap = File(Config.rawDataPath).walk().filter { it.isFile }.associate { it.name to it.length() }
        val configuration = Configuration().apply {
            set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem")
        }
        FileSystem.get(URI("hdfs://${Config.hdfsIp}:${Config.hdfsPort}"), configuration).use { fileSystem ->
            var hdfsLengthMap = HashMap<String, Long>()
            if (fileSystem.exists(Path(hdfsPath))) {
                val listFiles = fileSystem.listFiles(Path(hdfsPath), true)
                while (listFiles.hasNext()) {
                    val fileStatus = listFiles.next()
                    model.fileList.add(FileInfoModel(fileStatus.path.toUri().path, fileStatus.len.toString()))
                    hdfsLengthMap[fileStatus.path.name] = fileStatus.len
                }
                if (rawDataLengthMap == hdfsLengthMap) {
                    logger.info("hdfs文件已上传，跳过此步骤。")
                    return@use
                }
            }

            logger.info("hdfs上传开始")

            fileSystem.mkdirs(Path(parentHdfsPath))
            fileSystem.copyFromLocalFile(Path(Config.rawDataPath), Path(parentHdfsPath))

            logger.info("hdfs上传结束")

            val listFiles = fileSystem.listFiles(Path(hdfsPath), true)
            hdfsLengthMap = HashMap()
            while (listFiles.hasNext()) {
                val fileStatus = listFiles.next()
                model.fileList.add(FileInfoModel(fileStatus.path.toUri().path, fileStatus.len.toString()))
                hdfsLengthMap[fileStatus.path.name] = fileStatus.len
            }
            val difference = rawDataLengthMap.filter { (name, length) -> hdfsLengthMap[name] != length }
            if (difference.isNotEmpty()) {
                logger.error("下面这些文件上传失败：\n" + objectMapper.writeValueAsString(difference))
                return@use
            }
        }

        model.apply {
            taskEndTime = Date()
            // taskUseTime = TimeUtil.mmToFormat(taskEndTime!!.time - taskStartTime!!.time)
            taskPath = hdfsPath
            fileSizeTotal = model.fileList.map { it.byteSize.toLong() }
                .reduce { size1, size2 -> (size1 + size2) }.run { FileUtil.byteToHigher(this) }
            fileCount = model.fileList.size
        }
        Config.hdfsPath = hdfsPath

        return model
    }

    fun sparkFileUpload(): FileModel {
        val model = FileModel()
        // jars 包要在 /shenlan/spark 下运行，spark程序也要在这个下面运行，否则找不到包
        model.taskPath = Config.hdfsPath.replace("history", Config.outType)
        ExecUtil.remoteExec("hdfs dfs -rm -r ${model.taskPath}", Config.serverIp, Config.serverUserName, Config.serverUserPassword)
        model.taskStartTime = Date()
        logger.info("spark计算开始")
        var command = ""
        val path = "cd ${Config.sparkJarPath}"

        command += path

        val jars =
            "jars=./lib/series-reducer-0.2.0.jar,./lib/jts-core-1.18.2.jar,./lib/htrace-core-3.2.0-incubating.jar,./lib/tephra-api-0.14.0-incubating.jar,./lib/tephra-core-0.14.0-incubating.jar,./lib/twill-zookeeper-0.8.0.jar,./lib/twill-discovery-api-0.8.0.jar,./lib/disruptor-3.3.6.jar,./lib/antlr-runtime-3.5.2.jar,./lib/aiscollect_main.jar,./lib/kotlin-stdlib-jdk8-1.2.41.jar,./lib/kotlin-stdlib-1.2.41.jar,./lib/geohash-1.4.0.jar"
        val sparkCommand = mutableListOf<String>()
        //"spark-submit --num-executors 2 --executor-cores 4 --executor-memory 3G --class SparkUtilKt  --jars  \$jars transform.main.jar ${file.type} ${file.timeType} ${file.hdfsPath}"
        if (Config.hdfsPath.isNotEmpty()) {

            val sc =
                "spark-submit ${Config.sparkRunningParameter} --class SparkUtilKt  --jars  \$jars transform.main.jar ${Config.sparkType} ${Config.timeType} ${Config.hdfsPath}"
            sparkCommand.add(sc)

        }

        command += " && $jars"

        sparkCommand.forEach { it ->
            command += " && $it"

        }

        ExecUtil.remoteExec(command, Config.serverIp, Config.serverUserName, Config.serverUserPassword)

        // 返回信息值
        model.taskEndTime = Date()
        logger.info("spark计算结束")

        // val milliseconds = model.taskEndTime!!.time - model.taskStartTime!!.time
        //
        // model.taskUseTime = TimeUtil.mmToFormat(milliseconds)



        // 此时下面的是文件夹
        if (Config.timeType == "0") {
            val r = ExecUtil.remoteExecReturnLists(
                "hdfs dfs -ls ${model.taskPath}",
                Config.serverIp, Config.serverUserName, Config.serverUserPassword
            )
            r.forEachIndexed { index, strings ->
                if (index != 0) {

                    val path = strings.last()
                    if (getSuccessFile(path)) {
                        model.errorMessage = "失败:${path} 文件路径下没生成success文件"
                    }
                    model.fileList.addAll(getHdfsFileInfo(path))

                }
            }
        } else {
            //
            model.fileList.addAll(getHdfsFileInfo(model.taskPath!!))
        }
        var intSizeTotal = 0L
        model.fileList.forEach { it ->
            intSizeTotal += it.byteSize.toLong()
        }
        model.computeFileSizeTotal(intSizeTotal)

        model.fileCount = model.fileList.size

        return model
    }

    private fun getHdfsFileInfo(path: String): List<FileInfoModel> {
        val list = mutableListOf<FileInfoModel>()
        val r = ExecUtil.remoteExecReturnLists(
            "hdfs dfs -ls $path",
            Config.serverIp,
            Config.serverUserName,
            Config.serverUserPassword
        )
        r.forEachIndexed { index, strings ->
            if (index != 0) {
                val fileInfoModel = FileInfoModel(strings[7], strings[4])
                list.add(fileInfoModel)
            }
        }
        return list
    }

    private fun getSuccessFile(path: String): Boolean {
        val r = ExecUtil.remoteExec(
            "hdfs dfs -ls ${path}/_SUCCESS",
            Config.serverIp,
            Config.serverUserName,
            Config.serverUserPassword
        )
        if ("".equals(r)) {
            return true
        }
        return false
    }

    fun chFileUpload(): ChReturnModel {
        val tableStructure = if (Config.outType == "datagram") {
            "( date DateTime,type String,messageId String,mmsi String,message String,geohash String,lat Float32 ,lng Float32 )"
        } else {
            "(date DateTime,mmsi UInt32,code Int8,turn Int8,speed Float32,accuracy UInt8,geohash String,lat Float32 ,lng Float32 ,course Float32,heading UInt16)"
        }

        var hdfsPath = Config.hdfsPath.replace("history", Config.outType)

        val dirName = Config.hdfsPath.split("/").last()

        hdfsPath += if (Config.timeType == "0") "/*/*" else "/*"

        val yearMonth = if (dirName.length == 4) dirName to "01" else dirName.substring(0, 4) to dirName.substring(4)
        val sqlCheckExist = if (Config.timeType == "0") {
            "SELECT COUNT(*) FROM ais${Config.chPath}_${Config.outType}_${Config.clickhousePostfix} WHERE date BETWEEN '${yearMonth.first}-01-01 00:00:00' AND '${
                (yearMonth.first.toInt() + 1).toString().padStart(2, '0')
            }-01-01 00:00:00'"
        } else {
            "SELECT COUNT(*) FROM ais${Config.chPath}_${Config.outType}_${Config.clickhousePostfix} WHERE date BETWEEN '${yearMonth.first}-${
                yearMonth.second.padStart(
                    2,
                    '0'
                )
            }-01 00:00:00' AND '${yearMonth.first}-${
                (yearMonth.second.toInt() + 1).toString().padStart(2, '0')
            }-01 00:00:00'"
        }

        val model = ChReturnModel().apply { taskStartTime = Date() }

        logger.info("连接clickhouse")

        ClickHouseDataSource(Config.clickhouseUrl).getConnection(Config.clickhouseUsername, Config.clickhousePassword).use { connection ->
            connection.createStatement().use { statement ->
                val existedNumber = statement.executeQuery(sqlCheckExist).use { it.next(); it.getLong(1) }
                if (existedNumber >= 10000) {
                    model.errorMessage = "clickhouse数据已存在，请不要重复插入。"
                    logger.error(model.errorMessage)
                    return model
                }

                model.oldCount = statement.executeQuery("SELECT COUNT(*) FROM ais${Config.chPath}_${Config.outType}_${Config.clickhousePostfix}")
                    .use { it.next(); it.getLong(1) }

                logger.info("clickhouse数据导入开始")

                statement.executeQuery("drop table if exists ais${Config.chPath}_${Config.outType}_tsv")
                statement.executeQuery("CREATE TABLE ais${Config.chPath}_${Config.outType}_tsv$tableStructure ENGINE = HDFS('hdfs://${Config.hdfsIp}:${Config.hdfsPort}$hdfsPath', 'TSV')")
                statement.executeQuery("insert into ais${Config.chPath}_${Config.outType}_${Config.clickhousePostfix} SELECT * FROM ais${Config.chPath}_${Config.outType}_tsv")
                statement.executeQuery("drop table ais${Config.chPath}_${Config.outType}_tsv")

                logger.info("clickhouse数据导入结束")

                model.newCount = statement.executeQuery("SELECT COUNT(*) FROM ais${Config.chPath}_${Config.outType}_${Config.clickhousePostfix}")
                    .use { it.next(); it.getLong(1) }
                model.taskEndTime = Date()

                if (model.oldCount == model.newCount) {
                    model.errorMessage = "数据插入失败"
                    logger.error(model.errorMessage)
                    return model
                }
            }
        }

        return model
    }
}
