public TimelineMetrics getMetricRecords()

in ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/PhoenixHBaseAccessor.java [954:1060]


  public TimelineMetrics getMetricRecords(
    final Condition condition, Multimap<String, List<Function>> metricFunctions)
    throws SQLException, IOException {

    validateConditionIsNotEmpty(condition);

    Connection conn = getConnection();
    PreparedStatement stmt = null;
    ResultSet rs = null;
    TimelineMetrics metrics = new TimelineMetrics();

    try {
      //get latest
      if (condition.isPointInTime()){
        getLatestMetricRecords(condition, conn, metrics);
      } else {
        if (condition.getEndTime() >= condition.getStartTime()) {

          if (CollectionUtils.isNotEmpty(condition.getUuids())) {
            stmt = PhoenixTransactSQL.prepareGetMetricsSqlStmt(conn, condition);
            rs = stmt.executeQuery();
            while (rs.next()) {
              appendMetricFromResultSet(metrics, condition, metricFunctions, rs);
            }
          }

          if (CollectionUtils.isNotEmpty(condition.getTransientMetricNames())) {
            stmt = PhoenixTransactSQL.prepareTransientMetricsSqlStmt(conn, condition);
            if (stmt != null) {
              rs = stmt.executeQuery();
              while (rs.next()) {
                TransientMetricReadHelper.appendMetricFromResultSet(metrics, condition, metricFunctions, rs);
              }
            }
          }
        } else {
          LOG.warn("Skipping metrics query because endTime < startTime");
        }
      }

    } catch (PhoenixIOException pioe) {
      Throwable pioe2 = pioe.getCause();
      // Need to find out if this is exception "Could not find hash cache
      // for joinId" or another PhoenixIOException
      if (pioe2 instanceof PhoenixIOException &&
        pioe2.getCause() instanceof DoNotRetryIOException) {
        String className = null;
        for (StackTraceElement ste : pioe2.getCause().getStackTrace()) {
          className = ste.getClassName();
        }

        if (className != null && className.equals("HashJoinRegionScanner")) {
          LOG.error("The cache might have expired and have been removed. Try to" +
            " increase the cache size by setting bigger value for " +
            "phoenix.coprocessor.maxMetaDataCacheSize in ams-hbase-site config." +
            " Falling back to sort-merge join algorithm.");
          PhoenixTransactSQL.setSortMergeJoinEnabled(true);
        }
      }
      throw pioe;
    } catch (RuntimeException ex) {
      // We need to find out if this is a real IO exception
      // or exception "maxStamp is smaller than minStamp"
      // which is thrown in hbase TimeRange.java
      Throwable io = ex.getCause();
      String className = null;
      if (io != null) {
        for (StackTraceElement ste : io.getStackTrace()) {
          className = ste.getClassName();
        }
      }
      if (className != null && className.equals("TimeRange")) {
        // This is "maxStamp is smaller than minStamp" exception
        // Log error and return empty metrics
        LOG.debug(io);
        return new TimelineMetrics();
      } else {
        throw ex;
      }

    } finally {
      if (rs != null) {
        try {
          rs.close();
        } catch (SQLException e) {
          // Ignore
        }
      }
      if (stmt != null) {
        try {
          stmt.close();
        } catch (SQLException e) {
          // Ignore
        }
      }
      if (conn != null) {
        try {
          conn.close();
        } catch (SQLException sql) {
          // Ignore
        }
      }
    }

    LOG.debug("Metrics records size: " + metrics.getMetrics().size());
    return metrics;
  }