public void validateRequiredProperties()

in ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java [102:206]


  public void validateRequiredProperties() throws InvalidTopologyException, GPLLicenseNotAcceptedException {

    // we don't want to include default stack properties so we can't just use hostGroup full properties
    Map<String, Map<String, String>> clusterConfigurations = blueprint.getConfiguration().getProperties();

    // we need to have real passwords, not references
    if (clusterConfigurations != null) {

      // need to reject blueprints that have LZO enabled if the Ambari Server hasn't been configured for it
      boolean gplEnabled = configuration.getGplLicenseAccepted();

      StringBuilder errorMessage = new StringBuilder();
      boolean containsSecretReferences = false;
      for (Map.Entry<String, Map<String, String>> configEntry : clusterConfigurations.entrySet()) {
        String configType = configEntry.getKey();
        if (configEntry.getValue() != null) {
          for (Map.Entry<String, String> propertyEntry : configEntry.getValue().entrySet()) {
            String propertyName = propertyEntry.getKey();
            String propertyValue = propertyEntry.getValue();
            if (propertyValue != null) {
              if (!gplEnabled && configType.equals("core-site")
                  && (propertyName.equals(LZO_CODEC_CLASS_PROPERTY_NAME) || propertyName.equals(CODEC_CLASSES_PROPERTY_NAME))
                  && propertyValue.contains(LZO_CODEC_CLASS)) {
                throw new GPLLicenseNotAcceptedException("Your Ambari server has not been configured to download LZO GPL software. " +
                    "Please refer to documentation to configure Ambari before proceeding.");
              }
              if (SecretReference.isSecret(propertyValue)) {
                errorMessage.append("  Config:" + configType + " Property:" + propertyName + "\n");
                containsSecretReferences = true;
              }
            }
          }
        }
      }
      if (containsSecretReferences) {
        throw new InvalidTopologyException("Secret references are not allowed in blueprints, " +
          "replace following properties with real passwords:\n" + errorMessage);
      }
    }


    for (HostGroup hostGroup : blueprint.getHostGroups().values()) {
      Map<String, Map<String, String>> operationalConfiguration = new HashMap<>(clusterConfigurations);

      operationalConfiguration.putAll(hostGroup.getConfiguration().getProperties());
      for (String component : hostGroup.getComponentNames()) {
        //check that MYSQL_SERVER component is not available while hive is using existing db
        if (component.equals("MYSQL_SERVER")) {
          Map<String, String> hiveEnvConfig = clusterConfigurations.get("hive-env");
          if (hiveEnvConfig != null && !hiveEnvConfig.isEmpty() && hiveEnvConfig.get("hive_database") != null
            && hiveEnvConfig.get("hive_database").startsWith("Existing")) {
            throw new InvalidTopologyException("Incorrect configuration: MYSQL_SERVER component is available but hive" +
              " using existing db!");
          }
        }
        if (ClusterTopologyImpl.isNameNodeHAEnabled(clusterConfigurations) && component.equals("NAMENODE")) {
            Map<String, String> hadoopEnvConfig = clusterConfigurations.get("hadoop-env");
            if(hadoopEnvConfig != null && !hadoopEnvConfig.isEmpty() && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_active") && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_standby")) {
              ArrayList<HostGroup> hostGroupsForComponent = new ArrayList<>(blueprint.getHostGroupsForComponent(component));
              Set<String> givenHostGroups = new HashSet<>();
              givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_active"));
              givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby"));
              if(givenHostGroups.size() != hostGroupsForComponent.size()) {
                 throw new IllegalArgumentException("NAMENODE HA host groups mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected Host groups are :" + hostGroupsForComponent);
              }
              if(HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")).matches() && HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")).matches()){
                for (HostGroup hostGroupForComponent : hostGroupsForComponent) {
                   Iterator<String> itr = givenHostGroups.iterator();
                   while(itr.hasNext()){
                      if(itr.next().contains(hostGroupForComponent.getName())){
                         itr.remove();
                      }
                   }
                 }
                 if(!givenHostGroups.isEmpty()){
                    throw new IllegalArgumentException("NAMENODE HA host groups mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected Host groups are :" + hostGroupsForComponent);
                 }
                }
              }
          }

        if (component.equals("HIVE_METASTORE")) {
          Map<String, String> hiveEnvConfig = clusterConfigurations.get("hive-env");
          if (hiveEnvConfig != null && !hiveEnvConfig.isEmpty() && hiveEnvConfig.get("hive_database") != null
            && hiveEnvConfig.get("hive_database").equals("Existing SQL Anywhere Database")
            && VersionUtils.compareVersions(stack.getVersion(), "2.3.0.0") < 0
            && stack.getName().equalsIgnoreCase("HDP")) {
            throw new InvalidTopologyException("Incorrect configuration: SQL Anywhere db is available only for stack HDP-2.3+ " +
              "and repo version 2.3.2+!");
          }
        }

        if (component.equals("OOZIE_SERVER")) {
          Map<String, String> oozieEnvConfig = clusterConfigurations.get("oozie-env");
          if (oozieEnvConfig != null && !oozieEnvConfig.isEmpty() && oozieEnvConfig.get("oozie_database") != null
            && oozieEnvConfig.get("oozie_database").equals("Existing SQL Anywhere Database")
            && VersionUtils.compareVersions(stack.getVersion(), "2.3.0.0") < 0
            && stack.getName().equalsIgnoreCase("HDP")) {
            throw new InvalidTopologyException("Incorrect configuration: SQL Anywhere db is available only for stack HDP-2.3+ " +
              "and repo version 2.3.2+!");
          }
        }
      }
    }
  }