#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

<#--# Kylin provides two configuration profiles: minimal and production(by default).-->
<#--# To switch to minimal: uncomment the properties-->
<#--#kylin.storage.columnar.spark-conf.spark.driver.memory=512m-->
<#--#kylin.storage.columnar.spark-conf.spark.executor.memory=512m-->
<#--#kylin.storage.columnar.spark-conf.spark.executor.memoryOverhead=512m-->
<#--#kylin.storage.columnar.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current -Dlog4j.configurationFile=spark-executor-log4j.xml -Dlog4j.debug -Dkylin.hdfs.working.dir=${kylin.env.hdfs-working-dir} -Dkap.metadata.identifier=${kylin.metadata.url.identifier} -Dkap.spark.category=sparder -Dkap.spark.project=${job.project} -XX:MaxDirectMemorySize=512M-->
<#--#kylin.storage.columnar.spark-conf.spark.yarn.am.memory=512m-->
<#--#kylin.storage.columnar.spark-conf.spark.executor.cores=1-->
<#--#kylin.storage.columnar.spark-conf.spark.executor.instances=1-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.adaptive.coalescePartitions.minPartitionNum=1-->
<#--#kylin.metadata.history-source-usage-unwrap-computed-column=true-->
<#--## Working folder in HDFS, better be qualified absolute path, make sure user has the right permission to this directory-->
<#--#kylin.env.hdfs-working-dir=/kylin-->
<#--## The build engine writes data to this file system, which is the same as defaultFS in core-site-xml by default-->
<#--#kylin.env.engine-write-fs=-->
<#--## zookeeper is used for distributed locked, service discovery, leader selection, etc.-->
<#--## example: kylin.env.zookeeper-connect-string=10.1.2.1:2181,10.1.2.2:2181,10.1.2.3:2181-->
<#--#kylin.env.zookeeper-connect-string=-->
<#--## save for InfluxDB-->
<#--#kylin.influxdb.address=localhost:8086-->
<#--#kylin.influxdb.username=root-->
<#--#kylin.influxdb.password=root-->
<#--#kylin.influxdb.https.enabled=false-->
<#--## daily operations-->
<#--#kylin.garbage.storage.executable-survival-time-threshold=30d-->
<#--## ==================== SERVER & WEB ====================-->
<#--## Kylin server mode, valid value [all, query, job]-->
<#--#kylin.server.mode=all-->
<#--#-->
<#--## KE server address, best to use HA domain name in PROD, used in yarn-cluster mode to update job info back to KE.-->
<#--## eg. 'kylin.server.address=10.1.2.30:7070'-->
<#--#-->
<#--## Kylin server port-->
<#--#server.port=7070-->
<#--## Kylin https-->
<#--#kylin.server.https.port=7443-->
<#--#-->
<#--#kylin.server.https.keystore-type=JKS-->
<#--#kylin.server.https.keystore-file=${KYLIN_HOME}/server/.keystore-->
<#--#kylin.server.https.keystore-password=changeit-->
<#--#-->
<#--## Set true to enable CORS for any origins-->
<#--#kylin.server.cors.allow-all=false-->
<#--#-->
<#--## ==================== JOB EXECUTION ====================-->
<#--#-->
<#--#### Spark conf overwrite for build engine-->
<#--#kylin.engine.spark-conf.spark.yarn.submit.file.replication=1-->
<#--#kylin.engine.spark-conf.spark.yarn.queue=default-->
<#--#kylin.engine.spark-conf.spark.driver.memoryOverhead=256-->
<#--##kylin.engine.spark-conf.spark.driver.memory=512m-->
<#--#kylin.engine.spark-conf.spark.driver.cores=1-->
<#--#kylin.engine.spark-conf.spark.storage.memoryFraction=0.3-->
<#--#kylin.engine.spark-conf.spark.eventLog.enabled=true-->
<#--#kylin.engine.spark-conf.spark.port.maxRetries=128-->
<#--#kylin.engine.spark-conf.spark.history.fs.logDirectory=${kylin.env.hdfs-working-dir}/spark-history-->
<#--#kylin.engine.spark-conf.spark.eventLog.dir=${kylin.env.hdfs-working-dir}/spark-history-->
<#--#kylin.engine.spark-conf.spark.driver.extraJavaOptions=-Dfile.encoding=UTF-8 -Dhdp.version=current -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${KYLIN_HOME}/logs-->
<#--#kylin.engine.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current -Dlog4j.configurationFile=spark-appmaster-log4j.xml-->
<#--#kylin.engine.spark-conf.spark.executor.extraJavaOptions=-Dfile.encoding=UTF-8 -Dhdp.version=current -Dlog4j.configurationFile=spark-executor-log4j.xml -Dkylin.hdfs.working.dir=${kylin.env.hdfs-working-dir} -Dkap.metadata.identifier=${kylin.metadata.url.identifier} -Dkap.spark.category=job -Dkap.spark.project=${job.project} -Dkap.spark.identifier=${job.id} -Dkap.spark.jobName=${job.stepId} -Duser.timezone=${user.timezone} -Dkap.spark.mountDir=${job.mountDir}-->
<#--#kylin.engine.spark-conf.spark.yarn.dist.files=${kylin.log.spark-executor-properties-file},${kylin.log.spark-appmaster-properties-file}-->
<#--#kylin.engine.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false-->
<#--#kylin.engine.spark-conf.spark.hadoop.hive.exec.scratchdir=${kylin.env.hdfs-working-dir}/hive-scratch-->
<#--#kylin.engine.spark-conf.spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive=true-->
<#--#kylin.engine.spark-conf.spark.sql.adaptive.enabled=true-->
<#--#kylin.engine.spark-conf.spark.sql.hive.caseSensitiveInferenceMode=NEVER_INFER-->
<#--#kylin.engine.spark-conf.spark.sql.broadcastTimeout=999999-->
<#--#kylin.engine.spark-conf.spark.sql.cbo.enabled=true-->
<#--#kylin.engine.spark-conf.spark.sql.crossJoin.enabled=true-->
<#--#kylin.engine.spark-conf.spark.sql.sources.bucketing.enabled=false-->
<#--#kylin.engine.driver-memory-strategy=2,20,100-->
<#--#kylin.engine.spark-conf.spark.yarn.stagingDir=${kylin.env.hdfs-working-dir}-->
<#--#kylin.engine.spark-conf.spark.master=yarn-->
<#--#kylin.engine.spark-conf.spark.submit.deployMode=client-->
<#--#-->
<#--#kylin.engine.spark-conf.spark.sql.hive.metastore.version=1.2.2-->
<#--#kylin.engine.spark-conf.spark.sql.hive.metastore.jars=${KYLIN_HOME}/spark/hive_1_2_2/*-->
<#--#-->
<#--#kylin.engine.spark-conf.spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension-->
<#--#kylin.engine.spark-conf.spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog-->
<#--#-->
<#--#kylin.engine.spark-conf.spark.stage.maxConsecutiveAttempts=1-->
<#--#-->
<#--## spark3 legacy config after calendar switch-->
<#--#kylin.engine.spark-conf.spark.sql.parquet.int96RebaseModeInWrite=LEGACY-->
<#--#kylin.engine.spark-conf.spark.sql.parquet.datetimeRebaseModeInWrite=LEGACY-->
<#--#kylin.engine.spark-conf.spark.sql.parquet.int96RebaseModeInRead=LEGACY-->
<#--#kylin.engine.spark-conf.spark.sql.parquet.datetimeRebaseModeInRead=LEGACY-->
<#--#kylin.engine.spark-conf.spark.sql.avro.datetimeRebaseModeInWrite=LEGACY-->
<#--#kylin.engine.spark-conf.spark.sql.avro.datetimeRebaseModeInRead=LEGACY-->
<#--#kylin.engine.spark-conf.spark.sql.legacy.timeParserPolicy=LEGACY-->
<#--#-->
<#--## spark3 close DPP feature-->
<#--#kylin.engine.spark-conf.spark.sql.optimizer.dynamicPartitionPruning.enabled=false-->
<#--#-->
<#--## ==================== QUERY SPARK CONTEXT & COLUMNAR STORAGE ====================-->
<#--#kylin.storage.quota-in-giga-bytes=10240-->
<#--#-->
<#--#kylin.storage.columnar.shard-size-mb=256-->
<#--## for any spark config entry in http://spark.apache.org/docs/latest/configuration.html#environment-variables, prefix it with "kap.storage.columnar.env" and append here-->
<#--#### path to hadoop conf, must be local on the server running kylin.sh. the value is set to be ${kylin_hadoop_conf_dir}, whose value will be set during bootstrap scripts.-->
<#--#### In most cases, the value can be auto-detected with possible value like /etc/hadoop/conf, so that normal users do not need to worry about this config.-->
<#--#kylin.storage.columnar.spark-env.HADOOP_CONF_DIR=${kylin_hadoop_conf_dir}-->
<#--#-->
<#--## for any spark config entry in http://spark.apache.org/docs/latest/configuration.html#spark-properties, prefix it with "kap.storage.columnar.spark-conf" and append here-->
<#--#-->
<#--#kylin.storage.columnar.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current -Dlog4j.configurationFile=spark-executor-log4j.xml -Dkylin.hdfs.working.dir=${kylin.env.hdfs-working-dir} -Dkap.metadata.identifier=${kylin.metadata.url.identifier} -Dkap.spark.category=sparder -Dkap.spark.project=${job.project} -Dkap.spark.mountDir=${kylin.tool.mount-spark-log-dir} -XX:MaxDirectMemorySize=896M-->
<#--#kylin.storage.columnar.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current -Dlog4j.configurationFile=spark-appmaster-log4j.xml-->
<#--#kylin.storage.columnar.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current-->
<#--##kylin.storage.columnar.spark-conf.spark.serializer=org.apache.spark.serializer.JavaSerializer-->
<#--#kylin.storage.columnar.spark-conf.spark.port.maxRetries=128-->
<#--#kylin.storage.columnar.spark-conf.spark.driver.memory=4096m-->
<#--## to avoid sparder driver OOM for decoding data (do not exceed the `spark.driver.memory` configuration)-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.driver.maxCollectSize=3600m-->
<#--#kylin.storage.columnar.spark-conf.spark.executor.memory=12288m-->
<#--#kylin.storage.columnar.spark-conf.spark.executor.memoryOverhead=3072m-->
<#--#kylin.storage.columnar.spark-conf.spark.yarn.am.memory=1024m-->
<#--#kylin.storage.columnar.spark-conf.spark.executor.cores=5-->
<#--#kylin.storage.columnar.spark-conf.spark.executor.instances=4-->
<#--#kylin.storage.columnar.spark-conf.spark.task.maxFailures=1-->
<#--#kylin.storage.columnar.spark-conf.spark.ui.port=4041-->
<#--#kylin.storage.columnar.spark-conf.spark.locality.wait=0s-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.dialect=hiveql-->
<#--## to reduce spark optimize time-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.constraintPropagation.enabled=false-->
<#--#-->
<#--## to avoid high concurrent case oom-->
<#--#kylin.storage.columnar.spark-conf.spark.ui.retainedStages=300-->
<#--#kylin.storage.columnar.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false-->
<#--#kylin.storage.columnar.spark-conf.spark.hadoop.hive.exec.scratchdir=${kylin.env.hdfs-working-dir}/hive-scratch-->
<#--#kylin.storage.columnar.spark-conf.spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive=true-->
<#--#kylin.storage.columnar.spark-conf.hive.execution.engine=MR-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.crossJoin.enabled=true-->
<#--#kylin.storage.columnar.spark-conf.spark.broadcast.autoClean.enabled=true-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.adaptive.enabled=false-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.objectHashAggregate.sortBased.fallbackThreshold=1-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.hive.caseSensitiveInferenceMode=NEVER_INFER-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.sources.bucketing.enabled=false-->
<#--#kylin.storage.columnar.spark-conf.spark.yarn.stagingDir=${kylin.env.hdfs-working-dir}-->
<#--#kylin.storage.columnar.spark-conf.spark.eventLog.enabled=true-->
<#--#kylin.storage.columnar.spark-conf.spark.history.fs.logDirectory=${kylin.env.hdfs-working-dir}/sparder-history-->
<#--#kylin.storage.columnar.spark-conf.spark.eventLog.dir=${kylin.env.hdfs-working-dir}/sparder-history-->
<#--#kylin.storage.columnar.spark-conf.spark.eventLog.rolling.enabled=true-->
<#--#kylin.storage.columnar.spark-conf.spark.eventLog.rolling.maxFileSize=100m-->
<#--#-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.hive.metastore.version=1.2.2-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.hive.metastore.jars=${KYLIN_HOME}/spark/hive_1_2_2/*-->
<#--#-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog-->
<#--#-->
<#--## to avoid cartesian partition oom, set to -1 or empty to turn off-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.cartesianPartitionNumThreshold=-1-->
<#--#-->
<#--## disable parquet columnindex to save the overhead on read column index-->
<#--## as column index won't be used by spark vectorized reader for now-->
<#--#kylin.storage.columnar.spark-conf.parquet.filter.columnindex.enabled=false-->
<#--#-->
<#--## spark3 legacy config after calendar switch-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.parquet.int96RebaseModeInWrite=LEGACY-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.parquet.datetimeRebaseModeInWrite=LEGACY-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.parquet.int96RebaseModeInRead=LEGACY-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.parquet.datetimeRebaseModeInRead=LEGACY-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.avro.datetimeRebaseModeInWrite=LEGACY-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.avro.datetimeRebaseModeInRead=LEGACY-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.legacy.timeParserPolicy=LEGACY-->
<#--#-->
<#--#-->
<#--## spark3 close DPP feature-->
<#--#kylin.storage.columnar.spark-conf.spark.sql.optimizer.dynamicPartitionPruning.enabled=false-->
<#--#-->
<#--## ==================== JOB SCHEDULER ====================-->
<#--#-->
<#--## max job retry on error, default 0: no retry-->
<#--#kylin.job.retry=0-->
<#--#kylin.job.retry-interval=30000-->
<#--#-->
<#--## Max count of concurrent jobs running-->
<#--#kylin.job.max-concurrent-jobs=20-->
<#--#-->
<#--## If true, will send email notification;-->
<#--#kylin.job.notification-enabled=false-->
<#--#kylin.job.notification-mail-enable-starttls=true-->
<#--#kylin.job.notification-mail-host=smtp.office365.com-->
<#--#kylin.job.notification-mail-port=587-->
<#--#kylin.job.notification-mail-username=kylin@example.com-->
<#--#kylin.job.notification-mail-password=mypassword-->
<#--#kylin.job.notification-mail-sender=kylin@example.com-->
<#--#-->
<#--#kylin.env.max-keep-log-file-number=10-->
<#--#kylin.env.max-keep-log-file-threshold-mb=256-->
<#--### use crontab to check whether the log files(shell.stderr, shell.stdout and kylin.out) needs to be rotated-->
<#--#kylin.env.log-rotate-check-cron=33 * * * *-->
<#--### whether to enable auto log rotate-->
<#--#kylin.env.log-rotate-enabled=true-->
<#--#-->
<#--## ==================== QUERY ====================-->
<#--#-->
<#--#kylin.query.pushdown-enabled=true-->
<#--#kylin.query.pushdown.runner-class-name=org.apache.kylin.query.pushdown.PushDownRunnerSparkImpl-->
<#--#kylin.query.pushdown.partition-check.runner-class-name=org.apache.kylin.query.pushdown.PushDownRunnerSparkImpl-->
<#--#kylin.query.escape-default-keyword=false-->
<#--#kylin.query.cache-enabled=true-->
<#--#kylin.query.force-limit=-1-->
<#--#kylin.query.replace-count-column-with-count-star=false-->
<#--#kylin.query.use-tableindex-answer-non-raw-query=false-->
<#--#kylin.query.query-with-execute-as=false-->
<#--#kylin.query.query-history-download-max-size=100000-->
<#--#kylin.query.query-history-download-batch-size=20000-->
<#--#kylin.query.engine.spark-scheduler-mode=FAIR-->
<#--#kylin.query.realization.chooser.thread-core-num=5-->
<#--#kylin.query.realization.chooser.thread-max-num=50-->
<#--#kylin.query.realization.chooser.cache-enabled=false-->
<#--#kylin.query.join-match-optimization-enabled=false-->
<#--#kylin.query.convert-count-distinct-expression-enabled=false-->
<#--#kylin.query.memory-limit-during-collect-mb=5400-->
<#--#kylin.query.cartesian-partition-num-threshold-factor=100-->
<#--#kylin.query.dataframe-cache-enabled=true-->
<#--#-->
<#--## spark context canary to monitor spark-->
<#--#kylin.canary.sqlcontext-enabled=false-->
<#--#-->
<#--## ==================== ASYNC QUERY ====================-->
<#--#-->
<#--#kylin.query.async.result-retain-days=7d-->
<#--#-->
<#--#kylin.query.unique-async-query-yarn-queue-enabled=false-->
<#--##kylin.query.async-query.submit-hadoop-conf-dir=-->
<#--#kylin.query.async-query.spark-conf.spark.yarn.queue=default-->
<#--#kylin.query.async-query.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current -Dlog4j.configurationFile=spark-executor-log4j.xml -Dkylin.hdfs.working.dir=${kylin.env.hdfs-working-dir} -Dkap.metadata.identifier=${kylin.metadata.url.identifier} -Dkap.spark.category=sparder -Dkap.spark.project=${job.project} -Dkap.spark.mountDir=${kylin.tool.mount-spark-log-dir} -XX:MaxDirectMemorySize=896M-->
<#--#kylin.query.async-query.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current-->
<#--#kylin.query.async-query.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current-->
<#--#kylin.query.async-query.spark-conf.spark.port.maxRetries=128-->
<#--#kylin.query.async-query.spark-conf.spark.driver.memory=4096m-->
<#--#kylin.query.async-query.spark-conf.spark.sql.driver.maxCollectSize=3600m-->
<#--#kylin.query.async-query.spark-conf.spark.executor.memory=12288m-->
<#--#kylin.query.async-query.spark-conf.spark.executor.memoryOverhead=3072m-->
<#--#kylin.query.async-query.spark-conf.spark.yarn.am.memory=1024m-->
<#--#kylin.query.async-query.spark-conf.spark.executor.cores=5-->
<#--#kylin.query.async-query.spark-conf.spark.executor.instances=4-->
<#--#kylin.query.async-query.spark-conf.spark.task.maxFailures=1-->
<#--#kylin.query.async-query.spark-conf.spark.ui.port=4041-->
<#--#kylin.query.async-query.spark-conf.spark.locality.wait=0s-->
<#--#kylin.query.async-query.spark-conf.spark.sql.dialect=hiveql-->
<#--#kylin.query.async-query.spark-conf.spark.sql.constraintPropagation.enabled=false-->
<#--#kylin.query.async-query.spark-conf.spark.ui.retainedStages=300-->
<#--#kylin.query.async-query.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false-->
<#--#kylin.query.async-query.spark-conf.spark.hadoop.hive.exec.scratchdir=${kylin.env.hdfs-working-dir}/hive-scratch-->
<#--#kylin.query.async-query.spark-conf.spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive=true-->
<#--#kylin.query.async-query.spark-conf.hive.execution.engine=MR-->
<#--#kylin.query.async-query.spark-conf.spark.sql.crossJoin.enabled=true-->
<#--#kylin.query.async-query.spark-conf.spark.broadcast.autoClean.enabled=true-->
<#--#kylin.query.async-query.spark-conf.spark.sql.objectHashAggregate.sortBased.fallbackThreshold=1-->
<#--#kylin.query.async-query.spark-conf.spark.sql.hive.caseSensitiveInferenceMode=NEVER_INFER-->
<#--#kylin.query.async-query.spark-conf.spark.sql.sources.bucketing.enabled=false-->
<#--#kylin.query.async-query.spark-conf.spark.yarn.stagingDir=${kylin.env.hdfs-working-dir}-->
<#--#kylin.query.async-query.spark-conf.spark.eventLog.enabled=true-->
<#--#kylin.query.async-query.spark-conf.spark.history.fs.logDirectory=${kylin.env.hdfs-working-dir}/sparder-history-->
<#--#kylin.query.async-query.spark-conf.spark.eventLog.dir=${kylin.env.hdfs-working-dir}/sparder-history-->
<#--#kylin.query.async-query.spark-conf.spark.eventLog.rolling.enabled=true-->
<#--#kylin.query.async-query.spark-conf.spark.eventLog.rolling.maxFileSize=100m-->
<#--#kylin.query.async-query.spark-conf.spark.sql.hive.metastore.version=1.2.2-->
<#--#kylin.query.async-query.spark-conf.spark.sql.hive.metastore.jars=${KYLIN_HOME}/spark/hive_1_2_2/*-->
<#--#kylin.query.async-query.spark-conf.spark.sql.cartesianPartitionNumThreshold=-1-->
<#--#kylin.query.async-query.spark-conf.parquet.filter.columnindex.enabled=false-->
<#--#kylin.query.async-query.spark-conf.spark.master=yarn-->
<#--#kylin.query.async-query.spark-conf.spark.submit.deployMode=client-->
<#--#-->
<#--#-->
<#--## ==================== SEGMENT PRUNER ====================-->
<#--#-->
<#--## Enable query filter with dimension range-->
<#--#kylin.storage.columnar.dimension-range-filter-enabled=true-->
<#--#-->
<#--## ==================== QUERY SECURITY ====================-->
<#--#-->
<#--## Enable/disable ACL check for cube query-->
<#--#kylin.query.security.acl-tcr-enabled=true-->
<#--#-->
<#--## ==================== SYS SECURITY ====================-->
<#--#-->
<#--## Spring security profile, options: testing, ldap, saml-->
<#--## with "testing" profile, user can use pre-defined name/pwd like KYLIN/ADMIN to login-->
<#--#kylin.security.profile=testing-->
<#--#-->
<#--## Default roles and admin roles in LDAP, for ldap and saml-->
<#--#kylin.security.acl.default-role=ROLE_ANALYST,ROLE_MODELER-->
<#--#kylin.security.acl.admin-role=ROLE_ADMIN-->
<#--#-->
<#--## LDAP authentication configuration-->
<#--#kylin.security.ldap.connection-server=ldap://ldap_server:389-->
<#--#kylin.security.ldap.connection-username=-->
<#--#kylin.security.ldap.connection-password=-->
<#--#-->
<#--## LDAP user account directory;-->
<#--#kylin.security.ldap.user-search-base=-->
<#--#kylin.security.ldap.user-search-pattern=-->
<#--#kylin.security.ldap.user-group-search-base=-->
<#--#-->
<#--## LDAP service account directory-->
<#--#kylin.security.ldap.service-search-base=${kylin.security.ldap.user-search-base}-->
<#--#kylin.security.ldap.service-search-pattern=${kylin.security.ldap.user-search-pattern}-->
<#--#kylin.security.ldap.service-group-search-base=${kylin.security.ldap.user-group-search-base}-->
<#--#-->
<#--### LDAP filter ##-->
<#--## used for searching all users-->
<#--#kylin.security.ldap.user-search-filter=(objectClass=person)-->
<#--## used for searching all group members-->
<#--#kylin.security.ldap.user-group-search-filter=(|(member={0})(memberUid={1}))-->
<#--## used for searching all groups-->
<#--#kylin.security.ldap.group-search-filter=(|(objectClass=groupOfNames)(objectClass=group))-->
<#--## used for searching all users in specific group-->
<#--#kylin.security.ldap.group-member-search-filter=(&(cn={0})(objectClass=groupOfNames))-->
<#--#-->
<#--### LDAP attribute ##-->
<#--## used for getting user's identifier (eg: username)-->
<#--#kylin.security.ldap.user-identifier-attr=cn-->
<#--## used for getting group's identifier-->
<#--#kylin.security.ldap.group-identifier-attr=cn-->
<#--## used for identifying group's member-->
<#--#kylin.security.ldap.group-member-attr=member-->
<#--#-->
<#--### SAML configurations for SSO-->
<#--## SAML IDP metadata file location-->
<#--#kylin.security.saml.metadata-file=classpath:sso_metadata.xml-->
<#--#kylin.security.saml.metadata-entity-base-url=https://hostname/kylin-->
<#--#kylin.security.saml.context-scheme=https-->
<#--#kylin.security.saml.context-server-name=hostname-->
<#--#kylin.security.saml.context-server-port=443-->
<#--#kylin.security.saml.context-path=/kylin-->
<#--#-->
<#--### CUSTOM UserService default impl-->
<#--#kylin.security.custom.user-service-class-name=-->
<#--### CUSTOM UserGroupService default impl-->
<#--#kylin.security.custom.user-group-service-class-name=-->
<#--### CUSTOM Authentication provider default impl-->
<#--#kylin.security.custom.authentication-provider-class-name=-->
<#--### CUSTOM logout success handler default impl-->
<#--#kylin.security.custom.logout-success-handler=org.springframework.security.web.authentication.logout.SimpleUrlLogoutSuccessHandler-->
<#--#-->
<#--### Row Acl filter limit-->
<#--#kylin.security.acl.row-filter-limit-threshold=100-->
<#--#-->
<#--## ==================== Metrics ====================-->
<#--#kylin.metrics.influx-db=KYLIN_METRICS-->
<#--#kylin.metrics.daily-influx-db=KYLIN_METRICS_DAILY-->
<#--#kylin.metrics.influx-rpc-service-bind-address=127.0.0.1:8088-->
<#--#-->
<#--## ==================== Circuit Breaker ====================-->
<#--#kylin.circuit-breaker.enabled=true-->
<#--#kylin.circuit-breaker.threshold.project=100-->
<#--#kylin.circuit-breaker.threshold.model=100-->
<#--#kylin.circuit-breaker.threshold.query-result-row-count=2000000-->
<#--#-->
<#--## ==================== Kerberos ====================-->
<#--#kylin.kerberos.enabled=false-->
<#--### Standard, FI-->
<#--#kylin.kerberos.platform=Standard-->
<#--#kylin.kerberos.principal=-->
<#--#kylin.kerberos.keytab=-->
<#--#-->
<#--#kylin.kerberos.cache=kap_kerberos.cache-->
<#--#kylin.kerberos.krb5-conf=krb5.conf-->
<#--#kylin.kerberos.ticket-refresh-interval-minutes=720-->
<#--#kylin.kerberos.zookeeper-server-principal=zookeeper/hadoop-->
<#--#-->
<#--## ==================== Cache =======================-->
<#--#kylin.cache.config=classpath:ehcache.xml-->
<#--#kylin.cache.redis.enabled=false-->
<#--#kylin.cache.redis.cluster-enabled=false-->
<#--#kylin.cache.redis.hosts=localhost:6379-->
<#--#kylin.cache.redis.password=-->
<#--#kylin.cache.redis.expire-time-unit=EX-->
<#--#kylin.cache.redis.expire-time=86400-->
<#--#kylin.cache.redis.exception-expire-time=600-->
<#--#kylin.cache.redis.connection-timeout=2000-->
<#--#kylin.cache.redis.so-timeout=2000-->
<#--#kylin.cache.redis.max-attempts=20-->
<#--#kylin.cache.redis.max-total=8-->
<#--#kylin.cache.redis.max-idle=8-->
<#--#kylin.cache.redis.min-idle=0-->
<#--#kylin.cache.redis.batch-count=100000-->
<#--#kylin.cache.redis.max-wait=300000-->
<#--#-->
<#--## ==================== Diagnosis ====================-->
<#--##kylin.diag.package.timeout-seconds=3600-->
<#--##kylin.diag.extraction.start-time-days=3-->
<#--#-->
<#--### Allow user to export query result-->
<#--#kylin.web.export-allow-admin=true-->
<#--#kylin.web.export-allow-other=true-->
<#--#-->
<#--### metastore check-->
<#--#kylin.health.metastore-warning-response-ms=300-->
<#--#kylin.health.metastore-error-response-ms=1000-->
<#--#-->
<#--### The unit is seconds, so 3600 equals one hour-->
<#--#kylin.source.load-hive-tablename-interval-seconds=3600-->
<#--#kylin.source.load-hive-tablename-enabled=true-->
<#--#-->
<#--#-->
<#--### acl-->
<#--#kylin.security.allow-project-admin-grant-acl=true-->
<#--#-->
<#--### ha-->
<#--#kylin.server.leader-race.enabled=true-->
<#--#kylin.server.leader-race.heart-beat-timeout=60-->
<#--#kylin.server.leader-race.heart-beat-interval=30-->
<#--#-->
<#--### jstack-->
<#--#kylin.task.jstack-dump-enabled=true-->
<#--#kylin.task.jstack-dump-interval-minutes=10-->
<#--#kylin.task.jstack-dump-log-files-max-count=20-->
<#--#-->
<#--#-->
<#--#kylin.web.session.secure-random-create-enabled=false-->
<#--#kylin.web.session.jdbc-encode-enabled=false-->
<#--#-->
<#--##user password algorithm-->
<#--#kylin.security.user-password-encoder=org.apache.kylin.rest.security.CachedBCryptPasswordEncoder-->
<#--#-->
<#--## model-->
<#--#kylin.model.recommendation-page-size=500-->
<#--#kylin.model.dimension-measure-name.max-length=300-->
<#--#-->
<#--## recommend-->
<#--#kylin.model.suggest-model-sql-limit=200-->
<#--#-->
<#--## KG-->
<#--#kylin.guardian.enabled=false-->
<#--#-->
<#--## Tool-->
<#--#kylin.tool.mount-spark-log-dir=-->
<#--#kylin.tool.clean-diag-tmp-file=false-->
<#--#kylin.tool.spark-log-extractor=org.apache.kylin.tool.YarnSparkLogExtractor-->
<#--#kylin.lightning.cluster-id=0-->
<#--#kylin.lightning.workspace-id=0-->
<#--#kylin.lightning.server.zookeeper-node=/kylin/management-->
<#--#-->
<#--#kylin.favorite.import-sql-max-size=1000-->
<#--#kylin.model.measure-name-check-enabled=true-->
<#--#-->
<#--## ==================== Streaming ====================-->
<#--#kylin.streaming.spark-conf.spark.sql.hive.metastore.version=1.2.2-->
<#--#kylin.streaming.spark-conf.spark.sql.hive.metastore.jars=${KYLIN_HOME}/spark/hive_1_2_2/*-->
<#--#kylin.streaming.spark-conf.spark.driver.extraJavaOptions=-Dfile.encoding=UTF-8 -Dhdp.version=current -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${KYLIN_HOME}/logs-->
<#--#kylin.streaming.spark-conf.spark.executor.extraJavaOptions=-Dfile.encoding=UTF-8 -Dhdp.version=current -Dkylin.hdfs.working.dir=${kylin.env.hdfs-working-dir} -Dkap.metadata.identifier=${kylin.metadata.url.identifier} -Dkap.spark.category=streaming_job-->

<#if dependencies.ZOOKEEPER??>
    <#assign zookeeper=dependencies.ZOOKEEPER quorum=[]>
    <#list zookeeper.serviceRoles['ZOOKEEPER_SERVER'] as role>
        <#assign quorum += [role.hostname + ":" + zookeeper.conf["zookeeper.client.port"]]>
    </#list>

</#if>

server.port=${conf['kylin.ui.port']}
kylin.query.init-sparder-async=false

kylin.env.apache-hadoop-conf-dir=/home/hadoop/apache-kylin/conf
kylin.env.apache-hive-conf-dir=/home/hadoop/apache-kylin/conf
kylin.metadata.url=kylin@jdbc,driverClassName=${conf['ConnectionDriverName']},url=${conf['ConnectionURL']},username=${conf['ConnectionUserName']},password=${conf['ConnectionPassword']},maxTotal=50,maxIdle=8
kylin.env.zookeeper-connect-string=${quorum?join(",")}
kylin.env.hdfs-working-dir=${conf['kylin.hdfs.work.dir']}

# Query
kylin.storage.columnar.spark-conf.spark.driver.memory=512M
kylin.storage.columnar.spark-conf.spark.driver.memoryOverhead=512M
kylin.storage.columnar.spark-conf.spark.executor.cores=1
kylin.storage.columnar.spark-conf.spark.executor.instances=1
kylin.storage.columnar.spark-conf.spark.executor.memory=1024M
kylin.storage.columnar.spark-conf.spark.executor.memoryOverhead=512M

# Build
kylin.engine.driver-memory-strategy=2,500
kylin.engine.spark-conf.spark.driver.memory=512M
kylin.engine.spark-conf.spark.driver.memoryOverhead=512M
kylin.engine.spark-conf.spark.executor.cores=1
kylin.engine.spark-conf.spark.executor.instances=1
kylin.engine.spark-conf.spark.executor.memory=1024M
kylin.engine.spark-conf.spark.executor.memoryOverhead=512M

# Async Query
kylin.query.async-query.spark-conf.spark.executor.cores=1
kylin.query.async-query.spark-conf.spark.driver.memory=1024M
kylin.query.async-query.spark-conf.spark.executor.memory=1024M
kylin.query.async-query.spark-conf.spark.executor.instances=1
kylin.query.async-query.spark-conf.spark.executor.memoryOverhead=512M

# Other
kylin.metadata.random-admin-password.enabled=false
kylin.query.engine.push-down.enable-prepare-statement-with-params=true
kylin.query.calcite.extras-props.FUN=standard,oracle
kylin.circuit-breaker.threshold.project=500
kylin.engine.resource-request-over-limit-proportion=3.0