bool validateBinaryModel()

in inference/src/translator/byte_array_util.cpp [34:82]


bool validateBinaryModel(const AlignedMemory& model, uint64_t fileSize) {
  const void* current = model.begin();
  uint64_t memoryNeeded =
      sizeof(uint64_t) * 2;  // We keep track of how much memory we would need if we have a complete file
  uint64_t numHeaders;
  if (fileSize >= memoryNeeded) {  // We have enough filesize to fetch the headers.
    uint64_t binaryFileVersion = *get<uint64_t>(current);
    numHeaders = *get<uint64_t>(current);  // number of item headers that follow
  } else {
    return false;
  }
  memoryNeeded += numHeaders * sizeof(Header);
  const Header* headers;
  if (fileSize >= memoryNeeded) {
    headers = get<Header>(current, numHeaders);  // read that many headers
  } else {
    return false;
  }

  // Calculate how many bytes we are going to for reading just the names and the shape
  for (uint64_t i = 0; i < numHeaders; i++) {
    memoryNeeded += headers[i].nameLength + headers[i].shapeLength * sizeof(int);
    // Advance the pointers.
    get<char>(current, headers[i].nameLength);
    get<int>(current, headers[i].shapeLength);
  }

  // Before we start reading the data, there is a small padding to ensure alignment
  // Read that in, before calculating the actual tensor memory requirements.
  uint64_t aligned_offset;
  if (fileSize >= memoryNeeded) {
    aligned_offset = *get<uint64_t>(current);  // Offset to align memory to 256 size
    memoryNeeded += aligned_offset + sizeof(uint64_t);
  } else {
    return false;
  }

  // Finally the tensor size:
  for (uint64_t i = 0; i < numHeaders; i++) {
    memoryNeeded += headers[i].dataLength;
  }

  // If this final check passes, the file is at least big enough to contain the model
  if (fileSize >= memoryNeeded) {
    return true;
  } else {
    return false;
  }
}