def _prepare_upload_request()

in dataflux_pytorch/multipart_upload/multipart.py [0:0]


    def _prepare_upload_request(self):
        """Prepare the contents of HTTP request to upload a part.

        This is everything that must be done before a request that doesn't
        require network I/O. This is based on the `sans-I/O`_ philosophy.

        For the time being, this **does require** some form of I/O to read
        a part from ``stream`` (via :func:`get_part_payload`). However, this
        will (almost) certainly not be network I/O.

        Returns:
            Tuple[str, str, bytes, Mapping[str, str]]: The quadruple

              * HTTP verb for the request (always PUT)
              * the URL for the request
              * the body of the request
              * headers for the request

            The headers incorporate the ``_headers`` on the current instance.

        Raises:
            ValueError: If the current upload has finished.

        .. _sans-I/O: https://sans-io.readthedocs.io/
        """
        if self.finished:
            raise ValueError("This part has already been uploaded.")

        MPU_PART_QUERY_TEMPLATE = "?partNumber={part}&uploadId={upload_id}"

        payload = bytes(self._view[self._start:self._end])

        self._checksum_object = _helpers._get_checksum_object(
            self._checksum_type)
        if self._checksum_object is not None:
            self._checksum_object.update(payload)

        part_query = MPU_PART_QUERY_TEMPLATE.format(part=self._part_number,
                                                    upload_id=self._upload_id)
        upload_url = self.upload_url + part_query
        return "PUT", upload_url, payload, self._headers