src/openai/resources/images.py [118:217]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self,
        *,
        image: Union[FileTypes, List[FileTypes]],
        prompt: str,
        background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
        mask: FileTypes | NotGiven = NOT_GIVEN,
        model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
        n: Optional[int] | NotGiven = NOT_GIVEN,
        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
        response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
        | NotGiven = NOT_GIVEN,
        user: str | NotGiven = NOT_GIVEN,
        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
        # The extra values given here take precedence over values defined on the client or passed to this method.
        extra_headers: Headers | None = None,
        extra_query: Query | None = None,
        extra_body: Body | None = None,
        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
    ) -> ImagesResponse:
        """Creates an edited or extended image given one or more source images and a
        prompt.

        This endpoint only supports `gpt-image-1` and `dall-e-2`.

        Args:
          image: The image(s) to edit. Must be a supported image file or an array of images.

              For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
              25MB. You can provide up to 16 images.

              For `dall-e-2`, you can only provide one image, and it should be a square `png`
              file less than 4MB.

          prompt: A text description of the desired image(s). The maximum length is 1000
              characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.

          background: Allows to set transparency for the background of the generated image(s). This
              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
              `opaque` or `auto` (default value). When `auto` is used, the model will
              automatically determine the best background for the image.

              If `transparent`, the output format needs to support transparency, so it should
              be set to either `png` (default value) or `webp`.

          mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
              indicate where `image` should be edited. If there are multiple images provided,
              the mask will be applied on the first image. Must be a valid PNG file, less than
              4MB, and have the same dimensions as `image`.

          model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
              supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
              is used.

          n: The number of images to generate. Must be between 1 and 10.

          quality: The quality of the image that will be generated. `high`, `medium` and `low` are
              only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
              Defaults to `auto`.

          response_format: The format in which the generated images are returned. Must be one of `url` or
              `b64_json`. URLs are only valid for 60 minutes after the image has been
              generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
              will always return base64-encoded images.

          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
              (landscape), `1024x1536` (portrait), or `auto` (default value) for
              `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        """
        body = deepcopy_minimal(
            {
                "image": image,
                "prompt": prompt,
                "background": background,
                "mask": mask,
                "model": model,
                "n": n,
                "quality": quality,
                "response_format": response_format,
                "size": size,
                "user": user,
            }
        )
        files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
        # It should be noted that the actual Content-Type header that will be
        # sent to the server will contain a `boundary` parameter, e.g.
        # multipart/form-data; boundary=---abc--
        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/openai/resources/images.py [442:541]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self,
        *,
        image: Union[FileTypes, List[FileTypes]],
        prompt: str,
        background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
        mask: FileTypes | NotGiven = NOT_GIVEN,
        model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
        n: Optional[int] | NotGiven = NOT_GIVEN,
        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
        response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
        | NotGiven = NOT_GIVEN,
        user: str | NotGiven = NOT_GIVEN,
        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
        # The extra values given here take precedence over values defined on the client or passed to this method.
        extra_headers: Headers | None = None,
        extra_query: Query | None = None,
        extra_body: Body | None = None,
        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
    ) -> ImagesResponse:
        """Creates an edited or extended image given one or more source images and a
        prompt.

        This endpoint only supports `gpt-image-1` and `dall-e-2`.

        Args:
          image: The image(s) to edit. Must be a supported image file or an array of images.

              For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
              25MB. You can provide up to 16 images.

              For `dall-e-2`, you can only provide one image, and it should be a square `png`
              file less than 4MB.

          prompt: A text description of the desired image(s). The maximum length is 1000
              characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.

          background: Allows to set transparency for the background of the generated image(s). This
              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
              `opaque` or `auto` (default value). When `auto` is used, the model will
              automatically determine the best background for the image.

              If `transparent`, the output format needs to support transparency, so it should
              be set to either `png` (default value) or `webp`.

          mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
              indicate where `image` should be edited. If there are multiple images provided,
              the mask will be applied on the first image. Must be a valid PNG file, less than
              4MB, and have the same dimensions as `image`.

          model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
              supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
              is used.

          n: The number of images to generate. Must be between 1 and 10.

          quality: The quality of the image that will be generated. `high`, `medium` and `low` are
              only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
              Defaults to `auto`.

          response_format: The format in which the generated images are returned. Must be one of `url` or
              `b64_json`. URLs are only valid for 60 minutes after the image has been
              generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
              will always return base64-encoded images.

          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
              (landscape), `1024x1536` (portrait), or `auto` (default value) for
              `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        """
        body = deepcopy_minimal(
            {
                "image": image,
                "prompt": prompt,
                "background": background,
                "mask": mask,
                "model": model,
                "n": n,
                "quality": quality,
                "response_format": response_format,
                "size": size,
                "user": user,
            }
        )
        files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
        # It should be noted that the actual Content-Type header that will be
        # sent to the server will contain a `boundary` parameter, e.g.
        # multipart/form-data; boundary=---abc--
        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



