in streampark-common/src/main/scala/org/apache/streampark/common/conf/ConfigConst.scala [83:201]
def KEY_APP_CONF(prefix: String = null): String = s"${Option(prefix).getOrElse("")}conf"
def KEY_FLINK_CONF(prefix: String = null): String = s"${Option(prefix).getOrElse("")}flink.conf"
def KEY_APP_NAME(prefix: String = null): String = s"${Option(prefix).getOrElse("")}app.name"
def KEY_FLINK_SQL(prefix: String = null): String = s"${Option(prefix).getOrElse("")}sql"
def KEY_FLINK_PARALLELISM(prefix: String = null): String =
s"${Option(prefix).getOrElse("")}parallelism.default"
val KEY_FLINK_OPTION_PREFIX = "flink.option."
val KEY_FLINK_PROPERTY_PREFIX = "flink.property."
val KEY_FLINK_TABLE_PREFIX = "flink.table."
val KEY_APP_PREFIX = "app."
val KEY_SQL_PREFIX = "sql."
val KEY_FLINK_APP_NAME = "pipeline.name"
val KEY_YARN_APP_ID = "yarn.application.id"
val KEY_YARN_APP_NAME = "yarn.application.name"
val KEY_YARN_APP_QUEUE = "yarn.application.queue"
val KEY_YARN_APP_NODE_LABEL = "yarn.application.node-label"
val KEY_K8S_IMAGE_PULL_POLICY = "kubernetes.container.image.pull-policy"
// ---table---
val KEY_FLINK_TABLE_PLANNER = "flink.table.planner"
val KEY_FLINK_TABLE_MODE = "flink.table.mode"
val KEY_FLINK_TABLE_CATALOG = "flink.table.catalog"
val KEY_FLINK_TABLE_DATABASE = "flink.table.database"
/** about config Kafka */
val KAFKA_SINK_PREFIX = "kafka.sink."
val KAFKA_SOURCE_PREFIX = "kafka.source."
val KEY_KAFKA_TOPIC = "topic"
val KEY_KAFKA_SEMANTIC = "semantic"
val KEY_KAFKA_PATTERN = "pattern"
val KEY_KAFKA_START_FROM = "start.from"
val KEY_KAFKA_START_FROM_OFFSET = "offset"
val KEY_KAFKA_START_FROM_TIMESTAMP = "timestamp"
val KEY_ALIAS = "alias"
/** about config jdbc... */
val KEY_JDBC_PREFIX = "jdbc."
val KEY_JDBC_DRIVER = "driverClassName"
val KEY_JDBC_URL = "jdbcUrl"
val KEY_JDBC_USER = "username"
val KEY_JDBC_PASSWORD = "password"
val KEY_JDBC_INSERT_BATCH = "batch.size"
val DEFAULT_JDBC_INSERT_BATCH = 1
val MONGO_PREFIX = "mongodb."
/** about config HBase */
val HBASE_PREFIX = "hbase."
val KEY_HBASE_COMMIT_BATCH = "hbase.commit.batch"
val KEY_HBASE_WRITE_SIZE = "hbase.client.write.size"
val DEFAULT_HBASE_COMMIT_BATCH = 1000
val KEY_HBASE_AUTH_USER = "hbase.auth.user"
val DEFAULT_HBASE_WRITE_SIZE: Int = 1024 * 1024 * 10
/** about influx */
val INFLUX_PREFIX = "influx."
val KEY_FLINK_APPLICATION_MAIN_CLASS = "$internal.application.main"
val KEY_FLINK_JM_PROCESS_MEMORY = "jobmanager.memory.process.size"
val KEY_FLINK_TM_PROCESS_MEMORY = "taskmanager.memory.process.size"
val STREAMPARK_FLINKSQL_CLIENT_CLASS = "org.apache.streampark.flink.cli.SqlClient"
def printLogo(info: String): Unit = {
// scalastyle:off println
println("\n")
println(" _____ __ __ ")
println(" / ___// /_________ ____ _____ ___ ____ ____ ______/ /__ ")
println(" \\__ \\/ __/ ___/ _ \\/ __ `/ __ `__ \\/ __ \\ __ `/ ___/ //_/")
println(" ___/ / /_/ / / __/ /_/ / / / / / / /_/ / /_/ / / / ,< ")
println(" /____/\\__/_/ \\___/\\__,_/_/ /_/ /_/ ____/\\__,_/_/ /_/|_| ")
println(" /_/ \n\n")
println(" Version: 2.2.0-SNAPSHOT ")
println(" WebSite: https://streampark.apache.org ")
println(" GitHub : https://github.com/apache/incubator-streampark ")
println(s" Info : $info ")
println(s" Time : ${LocalDateTime.now} \n\n")
// scalastyle:on println
}