static

in ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java [2856:3298]


  static {

    allUpdaters.add(singleHostTopologyUpdaters);
    allUpdaters.add(multiHostTopologyUpdaters);
    allUpdaters.add(dbHostTopologyUpdaters);
    allUpdaters.add(mPropertyUpdaters);
    allUpdaters.add(nonTopologyUpdaters);

    Map<String, PropertyUpdater> amsSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> druidCommon = new HashMap<>();
    Map<String, PropertyUpdater> hdfsSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> mapredSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> coreSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> hbaseSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> yarnSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> hiveSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> hiveSiteNonTopologyMap = new HashMap<>();
    Map<String, PropertyUpdater> hiveEnvOriginalValueMap = new HashMap<>();
    Map<String, PropertyUpdater> oozieSiteOriginalValueMap = new HashMap<>();
    Map<String, PropertyUpdater> oozieSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> stormSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> stormSiteNonTopologyMap = new HashMap<>();
    Map<String, PropertyUpdater> accumuloSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> falconStartupPropertiesMap = new HashMap<>();
    Map<String, PropertyUpdater> kafkaBrokerMap = new HashMap<>();
    Map<String, PropertyUpdater> kafkaBrokerNonTopologyMap = new HashMap<>();
    Map<String, PropertyUpdater> atlasPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> mapredEnvMap = new HashMap<>();
    Map<String, PropertyUpdater> mHadoopEnvMap = new HashMap<>();
    Map<String, PropertyUpdater> shHadoopEnvMap = new HashMap<>();
    Map<String, PropertyUpdater> clusterEnvMap = new HashMap<>();
    Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<>();
    Map<String, PropertyUpdater> hiveEnvMap = new HashMap<>();
    Map<String, PropertyUpdater> hiveInteractiveEnvMap = new HashMap<>();
    Map<String, PropertyUpdater> hiveInteractiveSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> oozieEnvMap = new HashMap<>();
    Map<String, PropertyUpdater> oozieEnvHeapSizeMap = new HashMap<>();
    Map<String, PropertyUpdater> multiWebhcatSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> multiHbaseSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> livy2Conf = new HashMap<>();
    Map<String, PropertyUpdater> multiStormSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> multiCoreSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> multiHdfsSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> multiHiveSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> multiKafkaBrokerMap = new HashMap<>();
    Map<String, PropertyUpdater> multiYarnSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> multiOozieSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> multiAccumuloSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> multiRangerKmsSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> rangerAdminPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> rangerEnvPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> rangerYarnAuditPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> rangerHdfsAuditPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> rangerHbaseAuditPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> rangerHiveAuditPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> rangerKnoxAuditPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> rangerKafkaAuditPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> rangerStormAuditPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> rangerAtlasAuditPropsMap = new HashMap<>();
    Map<String, PropertyUpdater> hawqSiteMap = new HashMap<>();
    Map<String, PropertyUpdater> zookeeperEnvMap = new HashMap<>();

    singleHostTopologyUpdaters.put("ams-site", amsSiteMap);
    singleHostTopologyUpdaters.put("druid-common", druidCommon);
    singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap);
    singleHostTopologyUpdaters.put("mapred-site", mapredSiteMap);
    singleHostTopologyUpdaters.put("core-site", coreSiteMap);
    singleHostTopologyUpdaters.put("hbase-site", hbaseSiteMap);
    singleHostTopologyUpdaters.put("yarn-site", yarnSiteMap);
    singleHostTopologyUpdaters.put("hive-site", hiveSiteMap);
    singleHostTopologyUpdaters.put("hive-interactive-env", hiveInteractiveEnvMap);
    singleHostTopologyUpdaters.put("storm-site", stormSiteMap);
    singleHostTopologyUpdaters.put("accumulo-site", accumuloSiteMap);
    singleHostTopologyUpdaters.put("falcon-startup.properties", falconStartupPropertiesMap);
    singleHostTopologyUpdaters.put("hive-env", hiveEnvMap);
    singleHostTopologyUpdaters.put("oozie-env", oozieEnvMap);
    singleHostTopologyUpdaters.put("kafka-broker", kafkaBrokerMap);
    singleHostTopologyUpdaters.put("admin-properties", rangerAdminPropsMap);
    singleHostTopologyUpdaters.put("ranger-env", rangerEnvPropsMap);
    singleHostTopologyUpdaters.put("ranger-yarn-audit", rangerYarnAuditPropsMap);
    singleHostTopologyUpdaters.put("ranger-hdfs-audit", rangerHdfsAuditPropsMap);
    singleHostTopologyUpdaters.put("ranger-hbase-audit", rangerHbaseAuditPropsMap);
    singleHostTopologyUpdaters.put("ranger-hive-audit", rangerHiveAuditPropsMap);
    singleHostTopologyUpdaters.put("ranger-knox-audit", rangerKnoxAuditPropsMap);
    singleHostTopologyUpdaters.put("ranger-kafka-audit", rangerKafkaAuditPropsMap);
    singleHostTopologyUpdaters.put("ranger-storm-audit", rangerStormAuditPropsMap);
    singleHostTopologyUpdaters.put("ranger-atlas-audit", rangerAtlasAuditPropsMap);
    singleHostTopologyUpdaters.put(HADOOP_ENV_CONFIG_TYPE_NAME, shHadoopEnvMap);
    singleHostTopologyUpdaters.put(CLUSTER_ENV_CONFIG_TYPE_NAME, clusterEnvMap);

    singleHostTopologyUpdaters.put("hawq-site", hawqSiteMap);
    singleHostTopologyUpdaters.put("zookeeper-env", zookeeperEnvMap);


    mPropertyUpdaters.put(HADOOP_ENV_CONFIG_TYPE_NAME, mHadoopEnvMap);
    mPropertyUpdaters.put("hbase-env", hbaseEnvMap);
    mPropertyUpdaters.put("mapred-env", mapredEnvMap);
    mPropertyUpdaters.put("oozie-env", oozieEnvHeapSizeMap);

    multiHostTopologyUpdaters.put("webhcat-site", multiWebhcatSiteMap);
    multiHostTopologyUpdaters.put("hbase-site", multiHbaseSiteMap);
    multiHostTopologyUpdaters.put("storm-site", multiStormSiteMap);
    multiHostTopologyUpdaters.put("core-site", multiCoreSiteMap);
    multiHostTopologyUpdaters.put("hdfs-site", multiHdfsSiteMap);
    multiHostTopologyUpdaters.put("hive-site", multiHiveSiteMap);
    multiHostTopologyUpdaters.put("hive-interactive-site", hiveInteractiveSiteMap);
    multiHostTopologyUpdaters.put("kafka-broker", multiKafkaBrokerMap);
    multiHostTopologyUpdaters.put("yarn-site", multiYarnSiteMap);
    multiHostTopologyUpdaters.put("oozie-site", multiOozieSiteMap);
    multiHostTopologyUpdaters.put("accumulo-site", multiAccumuloSiteMap);
    multiHostTopologyUpdaters.put("kms-site", multiRangerKmsSiteMap);
    multiHostTopologyUpdaters.put("application-properties", atlasPropsMap);
    multiHostTopologyUpdaters.put("livy2-conf", livy2Conf);

    dbHostTopologyUpdaters.put("hive-site", dbHiveSiteMap);

    nonTopologyUpdaters.put("hive-site", hiveSiteNonTopologyMap);
    nonTopologyUpdaters.put("kafka-broker", kafkaBrokerNonTopologyMap);
    nonTopologyUpdaters.put("storm-site", stormSiteNonTopologyMap);

    //todo: Need to change updaters back to being static
    //todo: will need to pass ClusterTopology in as necessary


    // NAMENODE
    hdfsSiteMap.put("dfs.http.address", new OptionalSingleHostTopologyUpdater("NAMENODE"));
    hdfsSiteMap.put("dfs.https.address", new OptionalSingleHostTopologyUpdater("NAMENODE"));
    coreSiteMap.put("fs.default.name", new OptionalSingleHostTopologyUpdater("NAMENODE"));
    hdfsSiteMap.put("dfs.namenode.http-address", new OptionalSingleHostTopologyUpdater("NAMENODE"));
    hdfsSiteMap.put("dfs.namenode.https-address", new OptionalSingleHostTopologyUpdater("NAMENODE"));
    hdfsSiteMap.put("dfs.namenode.rpc-address", new OptionalSingleHostTopologyUpdater("NAMENODE"));
    coreSiteMap.put("fs.defaultFS", new OptionalSingleHostTopologyUpdater("NAMENODE"));
    hbaseSiteMap.put("hbase.rootdir", new OptionalSingleHostTopologyUpdater("NAMENODE"));
    accumuloSiteMap.put("instance.volumes", new SingleHostTopologyUpdater("NAMENODE"));
    // HDFS shared.edits JournalNode Quorum URL uses semi-colons as separators
    multiHdfsSiteMap.put("dfs.namenode.shared.edits.dir", new MultipleHostTopologyUpdater("JOURNALNODE", ';', false, false, true));
    multiHdfsSiteMap.put("dfs.encryption.key.provider.uri", new MultipleHostTopologyUpdater("RANGER_KMS_SERVER", ';', false, false, false));
    // Explicit initial primary/secondary node assignment in HA
    clusterEnvMap.put(HDFS_ACTIVE_NAMENODE_PROPERTY_NAME, new SingleHostTopologyUpdater("NAMENODE"));
    clusterEnvMap.put(HDFS_STANDBY_NAMENODE_PROPERTY_NAME, new SingleHostTopologyUpdater("NAMENODE"));

    // SECONDARY_NAMENODE
    hdfsSiteMap.put("dfs.secondary.http.address", new OptionalSingleHostTopologyUpdater("SECONDARY_NAMENODE"));
    hdfsSiteMap.put("dfs.namenode.secondary.http-address", new OptionalSingleHostTopologyUpdater("SECONDARY_NAMENODE"));

    // JOBTRACKER
    mapredSiteMap.put("mapred.job.tracker", new SingleHostTopologyUpdater("JOBTRACKER"));
    mapredSiteMap.put("mapred.job.tracker.http.address", new SingleHostTopologyUpdater("JOBTRACKER"));
    mapredSiteMap.put("mapreduce.history.server.http.address", new SingleHostTopologyUpdater("JOBTRACKER"));
    mapredSiteMap.put("mapreduce.job.hdfs-servers", new SingleHostTopologyUpdater("NAMENODE"));


    // HISTORYSERVER
    yarnSiteMap.put("yarn.log.server.url", new OptionalSingleHostTopologyUpdater("HISTORYSERVER"));
    mapredSiteMap.put("mapreduce.jobhistory.webapp.address", new OptionalSingleHostTopologyUpdater("HISTORYSERVER"));
    mapredSiteMap.put("mapreduce.jobhistory.address", new OptionalSingleHostTopologyUpdater("HISTORYSERVER"));

    // RESOURCEMANAGER
    yarnSiteMap.put("yarn.resourcemanager.hostname", new OptionalSingleHostTopologyUpdater("RESOURCEMANAGER"));
    yarnSiteMap.put("yarn.resourcemanager.resource-tracker.address", new OptionalSingleHostTopologyUpdater("RESOURCEMANAGER"));
    yarnSiteMap.put("yarn.resourcemanager.webapp.address", new OptionalSingleHostTopologyUpdater("RESOURCEMANAGER"));
    yarnSiteMap.put("yarn.resourcemanager.scheduler.address", new OptionalSingleHostTopologyUpdater("RESOURCEMANAGER"));
    yarnSiteMap.put("yarn.resourcemanager.address", new OptionalSingleHostTopologyUpdater("RESOURCEMANAGER"));
    yarnSiteMap.put("yarn.resourcemanager.admin.address", new OptionalSingleHostTopologyUpdater("RESOURCEMANAGER"));
    yarnSiteMap.put("yarn.resourcemanager.webapp.https.address", new OptionalSingleHostTopologyUpdater("RESOURCEMANAGER"));

    // APP_TIMELINE_SERVER
    yarnSiteMap.put("yarn.timeline-service.address", new OptionalSingleHostTopologyUpdater("APP_TIMELINE_SERVER"));
    yarnSiteMap.put("yarn.timeline-service.webapp.address", new OptionalSingleHostTopologyUpdater("APP_TIMELINE_SERVER"));
    yarnSiteMap.put("yarn.timeline-service.webapp.https.address", new OptionalSingleHostTopologyUpdater("APP_TIMELINE_SERVER"));
    yarnSiteMap.put("yarn.log.server.web-service.url", new OptionalSingleHostTopologyUpdater("APP_TIMELINE_SERVER"));

    // TIMELINE_READER
    yarnSiteMap.put("yarn.timeline-service.reader.webapp.address", new MultipleHostTopologyUpdater("TIMELINE_READER"));
    yarnSiteMap.put("yarn.timeline-service.reader.webapp.https.address", new MultipleHostTopologyUpdater("TIMELINE_READER"));

    // HIVE_SERVER
    hiveSiteMap.put("hive.server2.authentication.ldap.url", new SingleHostTopologyUpdater("HIVE_SERVER2"));
    multiHiveSiteMap.put("hive.metastore.uris", new MultipleHostTopologyUpdater("HIVE_METASTORE", ',', true, true, true));
    dbHiveSiteMap.put("javax.jdo.option.ConnectionURL",
      new DBTopologyUpdater("MYSQL_SERVER", "hive-env", "hive_database"));
    multiCoreSiteMap.put("hadoop.proxyuser.hive.hosts", new MultipleHostTopologyUpdater("HIVE_SERVER"));
    multiCoreSiteMap.put("hadoop.proxyuser.HTTP.hosts", new MultipleHostTopologyUpdater("WEBHCAT_SERVER"));
    multiCoreSiteMap.put("hadoop.proxyuser.hcat.hosts", new MultipleHostTopologyUpdater("WEBHCAT_SERVER"));
    multiCoreSiteMap.put("hadoop.proxyuser.yarn.hosts", new MultipleHostTopologyUpdater("RESOURCEMANAGER"));
    multiCoreSiteMap.put("hadoop.security.key.provider.path", new MultipleHostTopologyUpdater("RANGER_KMS_SERVER", ';', false, false, true));
    multiWebhcatSiteMap.put("templeton.hive.properties", new TempletonHivePropertyUpdater());
    multiHiveSiteMap.put("hive.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
    multiHiveSiteMap.put("hive.cluster.delegation.token.store.zookeeper.connectString", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));

    // HIVE Interactive Server
    hiveInteractiveEnvMap.put("hive_server_interactive_host", new SingleHostTopologyUpdater("HIVE_SERVER_INTERACTIVE"));
    hiveInteractiveSiteMap.put("hive.llap.zk.sm.connectionString", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));

    // HIVE Atlas integration
    hiveSiteNonTopologyMap.put("hive.exec.post.hooks", new NonTopologyUpdater() {
      @Override
      public String updateForClusterCreate(String propertyName,
                                           String origValue,
                                           Map<String, Map<String, String>> properties,
                                           ClusterTopology topology) {
        String atlasHookClass = "org.apache.atlas.hive.hook.HiveHook";
        String[] hiveHooks = origValue.split(",");

        List<String> hiveHooksClean = new ArrayList<>();
        for(String hiveHook : hiveHooks) {
          if (!StringUtils.isBlank(hiveHook.trim())) {
            hiveHooksClean.add(hiveHook.trim());
          }
        }

        boolean isAtlasInCluster = topology.getBlueprint().getServices().contains("ATLAS");
        boolean isAtlasHiveHookEnabled = Boolean.parseBoolean(properties.get("hive-env").get("hive.atlas.hook"));

        // Append atlas hook if not already present.
        if (isAtlasInCluster || isAtlasHiveHookEnabled) {
          if (!hiveHooksClean.contains(atlasHookClass)) {
            hiveHooksClean.add(atlasHookClass);
          }
        } else {
          // Remove the atlas hook since Atlas service is not present.
          while (hiveHooksClean.contains(atlasHookClass)) {
            hiveHooksClean.remove(atlasHookClass);
          }
        }

        if (!hiveHooksClean.isEmpty()) {
          return StringUtils.join(hiveHooksClean, ",");
        } else {
          return " ";
        }
      }
    });

    // TODO AMBARI-17782, remove this property from hive-site only in HDP 2.5 and higher.
    hiveSiteNonTopologyMap.put("atlas.cluster.name", new NonTopologyUpdater() {
      @Override
      public String updateForClusterCreate(String propertyName,
                                           String origValue,
                                           Map<String, Map<String, String>> properties,
                                           ClusterTopology topology) {

        if (topology.getBlueprint().getServices().contains("ATLAS")) {
          // if original value is not set or is the default "primary" set the cluster id
          if (origValue == null || origValue.trim().isEmpty() || origValue.equals("primary")) {
            //use cluster id because cluster name may change
            return String.valueOf(topology.getClusterId());
          } else {
            // if explicitly set by user, don't override
            return origValue;
          }
        } else {
          return origValue;
        }
      }

      @Override
      public String updateForBlueprintExport(String propertyName,
                                             String origValue,
                                             Map<String, Map<String, String>> properties,
                                             ClusterTopology topology) {

        // if the value is the cluster id, then update to primary
        if (origValue.equals(String.valueOf(topology.getClusterId()))) {
          return "primary";
        }
        return origValue;
      }
    });

    // TODO AMBARI-17782, remove this property only from HDP 2.5 and higher.
    hiveSiteMap.put("atlas.rest.address", new SingleHostTopologyUpdater("ATLAS_SERVER") {
      @Override
      public String updateForClusterCreate(String propertyName,
                                           String origValue,
                                           Map<String, Map<String, String>> properties,
                                           ClusterTopology topology) {
        if (topology.getBlueprint().getServices().contains("ATLAS")) {
          String host = topology.getHostAssignmentsForComponent("ATLAS_SERVER").iterator().next();

          boolean tlsEnabled = Boolean.parseBoolean(properties.get("application-properties").get("atlas.enableTLS"));
          String scheme;
          String port;
          if (tlsEnabled) {
            scheme = "https";
            port = properties.get("application-properties").get("atlas.server.https.port");
          } else {
            scheme = "http";
            port = properties.get("application-properties").get("atlas.server.http.port");
          }

          return String.format("%s://%s:%s", scheme, host, port);
        }
        return origValue;
      }
    });


    // OOZIE_SERVER
    Map<String, PropertyUpdater> oozieStringPropertyUpdaterMap = singleHostTopologyUpdaters.get("oozie-site");
    if (oozieStringPropertyUpdaterMap == null) {
      oozieStringPropertyUpdaterMap = new HashMap<>();
    }
    oozieStringPropertyUpdaterMap.put("oozie.base.url", new SingleHostTopologyUpdater("OOZIE_SERVER"));
    singleHostTopologyUpdaters.put("oozie-site", oozieStringPropertyUpdaterMap);

    multiCoreSiteMap.put("hadoop.proxyuser.oozie.hosts", new MultipleHostTopologyUpdater("OOZIE_SERVER"));

    // ZOOKEEPER_SERVER
    multiHbaseSiteMap.put("hbase.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
    multiWebhcatSiteMap.put("templeton.zookeeper.hosts", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
    multiCoreSiteMap.put("ha.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
    multiYarnSiteMap.put("hadoop.registry.zk.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
    multiYarnSiteMap.put("yarn.resourcemanager.zk-address", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
    multiKafkaBrokerMap.put("zookeeper.connect", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
    multiAccumuloSiteMap.put("instance.zookeeper.host", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));

    // STORM
    stormSiteMap.put("nimbus.host", new SingleHostTopologyUpdater("NIMBUS"));
    stormSiteMap.put("nimbus_hosts", new SingleHostTopologyUpdater("NIMBUS"));
    stormSiteMap.put("drpc_server_host", new SingleHostTopologyUpdater("DRPC_SERVER"));
    stormSiteMap.put("drpc.servers", new SingleHostTopologyUpdater("DRPC_SERVER"));
    stormSiteMap.put("storm_ui_server_host", new SingleHostTopologyUpdater("STORM_UI_SERVER"));
    stormSiteMap.put("worker.childopts", new OptionalSingleHostTopologyUpdater("GANGLIA_SERVER"));
    stormSiteMap.put("supervisor.childopts", new OptionalSingleHostTopologyUpdater("GANGLIA_SERVER"));
    stormSiteMap.put("nimbus.childopts", new OptionalSingleHostTopologyUpdater("GANGLIA_SERVER"));
    // Storm AMS integration
    stormSiteNonTopologyMap.put("metrics.reporter.register", new NonTopologyUpdater() {
      @Override
      public String updateForClusterCreate(String propertyName,
                                           String origValue,
                                           Map<String, Map<String, String>> properties,
                                           ClusterTopology topology) {

        if (topology.getBlueprint().getServices().contains("AMBARI_METRICS")) {
          final String amsReporterClass = "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter";
          if (origValue == null || origValue.isEmpty()) {
            return amsReporterClass;
          }
        }
        return origValue;
      }
    });

    multiStormSiteMap.put("supervisor_hosts",
      new YamlMultiValuePropertyDecorator(new MultipleHostTopologyUpdater("SUPERVISOR")));
    multiStormSiteMap.put("storm.zookeeper.servers",
      new YamlMultiValuePropertyDecorator(new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER")));
    multiStormSiteMap.put("nimbus.seeds",
      new YamlMultiValuePropertyDecorator(new MultipleHostTopologyUpdater("NIMBUS"), YamlMultiValuePropertyDecorator.FlowStyle.PLAIN));


    // FALCON
    falconStartupPropertiesMap.put("*.broker.url", new SingleHostTopologyUpdater("FALCON_SERVER"));

    // KAFKA
    kafkaBrokerMap.put("kafka.ganglia.metrics.host", new OptionalSingleHostTopologyUpdater("GANGLIA_SERVER"));
    // KAFKA AMS integration
    kafkaBrokerNonTopologyMap.put("kafka.metrics.reporters", new NonTopologyUpdater() {
      @Override
      public String updateForClusterCreate(String propertyName,
                                           String origValue,
                                           Map<String, Map<String, String>> properties,
                                           ClusterTopology topology) {

        if (topology.getBlueprint().getServices().contains("AMBARI_METRICS")) {
          final String amsReportesClass = "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter";
          if (origValue == null || origValue.isEmpty()) {
            return amsReportesClass;
          } else if (!origValue.contains(amsReportesClass)) {
            return String.format("%s,%s", origValue, amsReportesClass);
          }
        }
        return origValue;
      }
    });

    // KNOX
    multiCoreSiteMap.put("hadoop.proxyuser.knox.hosts", new MultipleHostTopologyUpdater("KNOX_GATEWAY"));
    multiWebhcatSiteMap.put("webhcat.proxyuser.knox.hosts", new MultipleHostTopologyUpdater("KNOX_GATEWAY"));
    multiOozieSiteMap.put("hadoop.proxyuser.knox.hosts", new MultipleHostTopologyUpdater("KNOX_GATEWAY"));
    multiOozieSiteMap.put("oozie.service.ProxyUserService.proxyuser.knox.hosts", new MultipleHostTopologyUpdater("KNOX_GATEWAY"));

    // ATLAS
    atlasPropsMap.put("atlas.server.bind.address", new MultipleHostTopologyUpdater("ATLAS_SERVER"));
    atlasPropsMap.put("atlas.rest.address", new MultipleHostTopologyUpdater("ATLAS_SERVER", ',', true, true, true));
    atlasPropsMap.put("atlas.kafka.bootstrap.servers", new MultipleHostTopologyUpdater("KAFKA_BROKER"));
    atlasPropsMap.put("atlas.kafka.zookeeper.connect", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
    atlasPropsMap.put("atlas.graph.index.search.solr.zookeeper-url", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER", ',', false, true, true));
    atlasPropsMap.put("atlas.graph.storage.hostname", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
    atlasPropsMap.put("atlas.audit.hbase.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));

    // RANGER_ADMIN
    rangerAdminPropsMap.put("policymgr_external_url", new SingleHostTopologyUpdater("RANGER_ADMIN"));

    // RANGER ENV
    List<Map<String, PropertyUpdater>> configsWithRangerHdfsAuditDirProperty = ImmutableList.of(
      rangerEnvPropsMap,
      rangerYarnAuditPropsMap,
      rangerHdfsAuditPropsMap,
      rangerHbaseAuditPropsMap,
      rangerHiveAuditPropsMap,
      rangerKnoxAuditPropsMap,
      rangerKafkaAuditPropsMap,
      rangerStormAuditPropsMap,
      rangerAtlasAuditPropsMap
    );
    for (Map<String, PropertyUpdater> rangerAuditPropsMap: configsWithRangerHdfsAuditDirProperty) {
      rangerAuditPropsMap.put("xasecure.audit.destination.hdfs.dir", new OptionalSingleHostTopologyUpdater("NAMENODE"));
      // the same prop updater must be used as for fs.defaultFS in core-site
    }

    // RANGER KMS
    multiRangerKmsSiteMap.put("hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string",
      new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
    // Required due to AMBARI-4933.  These no longer seem to be required as the default values in the stack
    // are now correct but are left here in case an existing blueprint still contains an old value.
    addUnitPropertyUpdaters();

    hawqSiteMap.put("hawq_master_address_host", new SingleHostTopologyUpdater("HAWQMASTER"));
    hawqSiteMap.put("hawq_standby_address_host", new SingleHostTopologyUpdater("HAWQSTANDBY"));
    hawqSiteMap.put("hawq_dfs_url", new SingleHostTopologyUpdater("NAMENODE"));

    // AMS
    amsSiteMap.put("timeline.metrics.service.webapp.address", new SingleHostTopologyUpdater("METRICS_COLLECTOR") {
      @Override
      public String updateForClusterCreate(String propertyName, String origValue, Map<String, Map<String, String>> properties, ClusterTopology topology) {
        if (!origValue.startsWith(BIND_ALL_IP_ADDRESS)) {
          return origValue.replace(origValue.split(":")[0], BIND_ALL_IP_ADDRESS);
        } else {
          return origValue;
        }
      }
    });

    // DRUID
    druidCommon.put("metastore_hostname", HostGroupUpdater.INSTANCE);
    druidCommon.put("druid.metadata.storage.connector.connectURI", HostGroupUpdater.INSTANCE);
    druidCommon.put("druid.zk.service.host", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));

    livy2Conf.put("livy.server.recovery.state-store.url", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
  }