int test()

in caffe/tools/caffe.cpp [277:436]


int test() {
  CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to score.";
  CHECK_GT(FLAGS_weights.size(), 0) << "Need model weights to score.";
  vector<string> stages = get_stages_from_flags();

  // Set device id and mode
  vector<int> gpus;
  get_gpus(&gpus);
  if (gpus.size() != 0) {
    LOG(INFO) << "Use GPU with device ID " << gpus[0];
#ifndef CPU_ONLY
    cudaDeviceProp device_prop;
    cudaGetDeviceProperties(&device_prop, gpus[0]);
    LOG(INFO) << "GPU device name: " << device_prop.name;
#endif
    Caffe::SetDevice(gpus[0]);
    Caffe::set_mode(Caffe::GPU);
  } else {
    LOG(INFO) << "Use CPU.";
    Caffe::set_mode(Caffe::CPU);
  }
  // Instantiate the caffe net.
  Net<float> caffe_net(FLAGS_model, caffe::TEST, FLAGS_level, &stages);
  std::vector<std::string> model_names;
  boost::split(model_names, FLAGS_weights, boost::is_any_of(","));
  for (int i = 0; i < model_names.size(); ++i) {
    LOG(INFO) << "Copy weights from " << model_names[i];
    caffe_net.CopyTrainedLayersFrom(model_names[i]);
  }
  LOG(INFO) << "Running for " << FLAGS_iterations << " iterations.";

  vector<int> test_score_output_id;
  vector<int> output_to_cmd;
  vector<float> test_score;
  vector<ofstream> outfiles;
  float loss = 0;
  for (int i = 0; i < FLAGS_iterations; ++i) {
    float iter_loss;
    const vector<Blob<float>*>& result = caffe_net.Forward(&iter_loss);
    loss += iter_loss;

    if (i == 0) {
      // if the size of result blob is large than 4, do not output it to cmd
      for (int j = 0; j < result.size(); ++j)
        output_to_cmd.push_back(result[j]->count() < 4);
    }

    int idx = 0;
    for (int j = 0; j < result.size(); ++j) {
      if (output_to_cmd[j] == 0) continue;

      // the following code is used to accumulate the test_score
      const float* result_vec = result[j]->cpu_data();
      for (int k = 0; k < result[j]->count(); ++k, ++idx) {
        const float score = result_vec[k];
        if (i == 0) {
          test_score.push_back(score);
          test_score_output_id.push_back(j);
        } else {
          test_score[idx] += score;
        }

        // output the score to the console
        const std::string& output_name = caffe_net.blob_names()[
         caffe_net.output_blob_indices()[j]];
        LOG(INFO) << "Batch " << i << ", " << output_name << " = " << score;
      }
    }

    // save output blob into files
    if (FLAGS_blob_prefix.empty()) continue;

    if (FLAGS_save_seperately) {
      // save each blob to seperate files
      for (int j = 0; j < result.size(); ++j) {
        if (output_to_cmd[j] != 0) continue;

        char str[16];
        sprintf(str, "%05d", i);
        string filename = FLAGS_blob_prefix + string(str) + "_" +
            caffe_net.blob_names()[caffe_net.output_blob_indices()[j]];

        ofstream outfile(filename, std::ios::binary);

        if (FLAGS_binary_mode) {
          if (FLAGS_blob_header) {
            int ns = result[j]->num_axes();
            outfile.write((char*)(&ns), sizeof(int));
            for (int k = 0; k < ns; ++k) {
              int sk = result[j]->shape(k);
              outfile.write((char*)(&sk), sizeof(int));
            }
          }
          int sz = result[j]->count();
          outfile.write((char*)result[j]->cpu_data(), sizeof(float)*sz);
          outfile.close();
        } else {
          string result_buffer;
          const float* result_vec = result[j]->cpu_data();
          for (int k = 0; k < result[j]->count(); ++k) {
            result_buffer += std::to_string(result_vec[k]) + "\n";
          }
          outfile.write(result_buffer.c_str(), result_buffer.size());
        }
        outfile.close();

        // output the info to the console
        const std::string& output_name = caffe_net.blob_names()[
         caffe_net.output_blob_indices()[j]];
        LOG(INFO) << "Batch " << i << ", " << output_name;
      }
    } else {
      // save data to file
      outfiles.resize(result.size());
      for (int j = 0; j < result.size(); ++j) {
        if (output_to_cmd[j] != 0) continue;

        // open the corresponding files
        if (i == 0) {
          const std::string& filename = caffe_net.blob_names()[
          caffe_net.output_blob_indices()[j]];
          outfiles[j].open(FLAGS_blob_prefix + filename + ".dat",
              std::ios::binary);
          int it = FLAGS_iterations, cj = result[j]->count();
          outfiles[j].write((const char*)(&it), sizeof(int));
          outfiles[j].write((const char*)(&cj), sizeof(int));
        }

        outfiles[j].write((const char*)result[j]->cpu_data(),
            sizeof(float)*result[j]->count());

        // close the corresponding files
        if (i == FLAGS_iterations - 1)	outfiles[j].close();

        // output the info to the console
        const std::string& output_name = caffe_net.blob_names()[
         caffe_net.output_blob_indices()[j]];
        LOG(INFO) << "Batch " << i << ", " << output_name;
      }
    }
  }

  loss /= FLAGS_iterations;
  LOG(INFO) << "Loss: " << loss;
  for (int i = 0; i < test_score.size(); ++i) {
    const std::string& output_name = caffe_net.blob_names()[
         caffe_net.output_blob_indices()[test_score_output_id[i]]];
    const float loss_weight = caffe_net.blob_loss_weights()[
         caffe_net.output_blob_indices()[test_score_output_id[i]]];
    std::ostringstream loss_msg_stream;
    const float mean_score = test_score[i] / FLAGS_iterations;
    if (loss_weight) {
      loss_msg_stream << " (* " << loss_weight
          << " = " << loss_weight * mean_score << " loss)";
    }
    LOG(INFO) << output_name << " = " << mean_score << loss_msg_stream.str();
  }

  return 0;
}