std::vector forward()

in recipes/slimIPL/10h_supervised_slimipl.cpp [37:70]


  std::vector<fl::Variable> forward(
      const std::vector<fl::Variable>& input) override {
    auto out = input[0];
    auto xSizes = input[1].array();
    float dp = -1;
    if (input.size() > 2) {
      dp = af::sum<float>(input[2].array());
    } else {
      dp = 0.5;
    }
    // expected input dims T x C x 1 x B
    int T = out.dims(0), B = out.dims(3);
    auto inputMaxSize = af::tile(af::max(xSizes), 1, B);
    af::array inputNotPaddedSize = af::ceil(xSizes * T / inputMaxSize);
    auto padMask = af::iota(af::dim4(T, 1), af::dim4(1, B)) <
        af::tile(inputNotPaddedSize, T, 1);
    out = convFrontend_->modules()[0]->forward({out}).front();
    out = convFrontend_->modules()[1]->forward({out}).front();
    out = convFrontend_->modules()[2]->forward({out}).front();
    out = convFrontend_->modules()[3]->forward({out}).front();
    if (train_) {
      out = dropout(out, dp);
    }
    out = convFrontend_->modules()[5]->forward({out}).front();
    for (int trIdx = 0; trIdx < transformers_.size(); trIdx++) {
      if (dp >= 0) {
        transformers_[trIdx]->setDropout(dp);
        transformers_[trIdx]->setLayerDropout(dp);
      }
      out = transformers_[trIdx]->forward({out, fl::noGrad(padMask)}).front();
    }
    out = linear_->forward(out);
    return {out.as(input[0].type())};
  }