public synchronized Void call()

in hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java [4719:4822]


    public synchronized Void call() throws InterruptedException, ExecutionException {
      final Vector<Exception> exceptions = new Vector<>();

      try {
        final FileStatus[] regionDirs = fs.listStatus(tableDir.getPath());
        final List<Future<?>> futures = new ArrayList<>(regionDirs.length);

        for (final FileStatus regionDir : regionDirs) {
          errors.progress();
          final String encodedName = regionDir.getPath().getName();
          // ignore directories that aren't hexadecimal
          if (!encodedName.toLowerCase(Locale.ROOT).matches("[0-9a-f]+")) {
            continue;
          }

          if (!exceptions.isEmpty()) {
            break;
          }

          futures.add(executor.submit(new Runnable() {
            @Override
            public void run() {
              try {
                LOG.debug("Loading region info from hdfs:"+ regionDir.getPath());

                Path regioninfoFile = new Path(regionDir.getPath(),
                    HRegionFileSystem.REGION_INFO_FILE);
                boolean regioninfoFileExists = fs.exists(regioninfoFile);

                if (!regioninfoFileExists) {
                  // As tables become larger it is more and more likely that by the time you
                  // reach a given region that it will be gone due to region splits/merges.
                  if (!fs.exists(regionDir.getPath())) {
                    LOG.warn("By the time we tried to process this region dir it was already " +
                        "gone: " + regionDir.getPath());
                    return;
                  }
                }

                HbckInfo hbi = HBaseFsck.this.getOrCreateInfo(encodedName);
                HdfsEntry he = new HdfsEntry();
                synchronized (hbi) {
                  if (hbi.getHdfsRegionDir() != null) {
                    errors.print("Directory " + encodedName + " duplicate?? " +
                                 hbi.getHdfsRegionDir());
                  }

                  he.hdfsRegionDir = regionDir.getPath();
                  he.hdfsRegionDirModTime = regionDir.getModificationTime();
                  he.hdfsRegioninfoFilePresent = regioninfoFileExists;
                  // we add to orphan list when we attempt to read .regioninfo

                  // Set a flag if this region contains only edits
                  // This is special case if a region is left after split
                  he.hdfsOnlyEdits = true;
                  FileStatus[] subDirs = fs.listStatus(regionDir.getPath());
                  Path ePath = new Path(regionDir.getPath(), HConstants.RECOVERED_EDITS_DIR);
                    // WAS => WALSplitUtil.getRegionDirRecoveredEditsDir(regionDir.getPath());
                  for (FileStatus subDir : subDirs) {
                    errors.progress();
                    String sdName = subDir.getPath().getName();
                    if (!sdName.startsWith(".") && !sdName.equals(ePath.getName())) {
                      he.hdfsOnlyEdits = false;
                      break;
                    }
                  }
                  hbi.hdfsEntry = he;
                }
              } catch (Exception e) {
                LOG.error("Could not load region dir", e);
                exceptions.add(e);
              }
            }
          }));
        }

        // Ensure all pending tasks are complete (or that we run into an exception)
        for (Future<?> f : futures) {
          if (!exceptions.isEmpty()) {
            break;
          }
          try {
            f.get();
          } catch (ExecutionException e) {
            LOG.error("Unexpected exec exception!  Should've been caught already.  (Bug?)", e);
            // Shouldn't happen, we already logged/caught any exceptions in the Runnable
          }
        }
      } catch (IOException e) {
        LOG.error("Cannot execute WorkItemHdfsDir for " + tableDir, e);
        exceptions.add(e);
      } finally {
        if (!exceptions.isEmpty()) {
          errors.reportError(ErrorReporter.ERROR_CODE.RS_CONNECT_FAILURE,
              "Table Directory: " + tableDir.getPath().getName() +
                  " Unable to fetch all HDFS region information. ");
          // Just throw the first exception as an indication something bad happened
          // Don't need to propagate all the exceptions, we already logged them all anyway
          throw new ExecutionException("First exception in WorkItemHdfsDir",
              exceptions.firstElement());
        }
      }
      return null;
    }