protected int process()

in src/java/org/apache/nutch/indexer/IndexingFiltersChecker.java [144:314]


  protected int process(String url, StringBuilder output) throws Exception {
    if (normalizers != null) {
      url = normalizers.normalize(url, URLNormalizers.SCOPE_DEFAULT);
    }

    LOG.info("fetching: " + url);

    CrawlDatum datum = new CrawlDatum();

    Iterator<String> iter = metadata.keySet().iterator();
    while (iter.hasNext()) {
      String key = iter.next();
      String value = metadata.get(key);
      if (value == null)
        value = "";
      datum.getMetaData().put(new Text(key), new Text(value));
    }

    int maxRedirects = getConf().getInt("http.redirect.max", 3);
    if (followRedirects) {
      if (maxRedirects == 0) {
        LOG.info("Following max. 3 redirects (ignored http.redirect.max == 0)");
        maxRedirects = 3;
      } else {
        LOG.info("Following max. {} redirects", maxRedirects);
      }
    }

    ProtocolOutput protocolOutput = getProtocolOutput(url, datum,
        checkRobotsTxt);
    Text turl = new Text(url);

    // Following redirects and not reached maxRedirects?
    int numRedirects = 0;
    while (protocolOutput != null && !protocolOutput.getStatus().isSuccess()
        && followRedirects && protocolOutput.getStatus().isRedirect()
        && maxRedirects >= numRedirects) {
      String[] stuff = protocolOutput.getStatus().getArgs();
      url = stuff[0];
      LOG.info("Follow redirect to {}", url);

      if (normalizers != null) {
        url = normalizers.normalize(url, URLNormalizers.SCOPE_DEFAULT);
      }

      turl.set(url);

      // try again
      protocolOutput = getProtocolOutput(url, datum, checkRobotsTxt);
      numRedirects++;
    }

    if (checkRobotsTxt && protocolOutput == null) {
      System.err.println("Fetch disallowed by robots.txt");
      return -1;
    }

    if (!protocolOutput.getStatus().isSuccess()) {
      System.err.println("Fetch failed with protocol status: "
          + protocolOutput.getStatus());

      if (protocolOutput.getStatus().isRedirect()) {
          System.err.println("Redirect(s) not handled due to configuration.");
          System.err.println("Max Redirects to handle per config: " + maxRedirects);
          System.err.println("Number of Redirects handled: " + numRedirects);
      }
      return -1;
    }

    Content content = protocolOutput.getContent();

    if (content == null) {
      output.append("No content for " + url + "\n");
      return 0;
    }

    String contentType = content.getContentType();

    if (contentType == null) {
      LOG.error("Failed to determine content type!");
      return -1;
    }

    // store the guessed content type in the crawldatum
    datum.getMetaData().put(new Text(Metadata.CONTENT_TYPE),
        new Text(contentType));

    if (ParseSegment.isTruncated(content)) {
      LOG.warn("Content is truncated, parse may fail!");
    }

    ScoringFilters scfilters = new ScoringFilters(getConf());
    // call the scoring filters
    try {
      scfilters.passScoreBeforeParsing(turl, datum, content);
    } catch (Exception e) {
      LOG.warn("Couldn't pass score, url {} ({})", url, e);
    }

    LOG.info("parsing: {}", url);
    LOG.info("contentType: {}", contentType);

    ParseResult parseResult = new ParseUtil(getConf()).parse(content);

    NutchDocument doc = new NutchDocument();
    doc.add("id", url);
    Text urlText = new Text(url);

    Inlinks inlinks = null;
    Parse parse = parseResult.get(urlText);
    if (parse == null) {
      LOG.error("Failed to get parse from parse result");
      LOG.error("Available parses in parse result (by URL key):");
      for (Map.Entry<Text, Parse> entry : parseResult) {
        LOG.error("  " + entry.getKey());
      }
      LOG.error("Parse result does not contain a parse for URL to be checked:");
      LOG.error("  " + urlText);
      return -1;
    }

    byte[] signature = SignatureFactory.getSignature(getConf()).calculate(content,
        parse);
    parse.getData().getContentMeta()
        .set(Nutch.SIGNATURE_KEY, StringUtil.toHexString(signature));
    String digest = parse.getData().getContentMeta().get(Nutch.SIGNATURE_KEY);
    doc.add("digest", digest);
    datum.setSignature(signature);

    // call the scoring filters
    try {
      scfilters.passScoreAfterParsing(turl, content, parseResult.get(turl));
    } catch (Exception e) {
      LOG.warn("Couldn't pass score, url {} ({})", turl, e);
    }

    IndexingFilters indexers = new IndexingFilters(getConf());

    try {
      doc = indexers.filter(doc, parse, urlText, datum, inlinks);
    } catch (IndexingException e) {
      e.printStackTrace();
    }

    if (doc == null) {
      output.append("Document discarded by indexing filter\n");
      return 0;
    }

    for (String fname : doc.getFieldNames()) {
      List<Object> values = doc.getField(fname).getValues();
      if (values != null) {
        for (Object value : values) {
          String str = value.toString();
          int minText = dumpText ? str.length() : Math.min(100, str.length());
          output.append(fname + " :\t" + str.substring(0, minText) + "\n");
        }
      }
    }
    
    output.append("\n"); // For readability if keepClientCnxOpen

    if (doIndex) {
      IndexWriters writers = IndexWriters.get(getConf());
      writers.open(getConf(), "IndexingFilterChecker");
      writers.write(doc);
      writers.close();
    }

    return 0;
  }