def load_items_df()

in python/sedona/spark/stac/collection_client.py [0:0]


    def load_items_df(self, bbox, datetime, ids, max_items):
        # Load the collection data from the specified collection URL
        if not ids and not bbox and not datetime and max_items is not None:
            df = (
                self.spark.read.format("stac")
                .option("itemsLimitMax", max_items)
                .load(self.collection_url)
            )
        else:
            df = self.spark.read.format("stac").load(self.collection_url)
            # Apply ID filters if provided
            if ids:
                if isinstance(ids, tuple):
                    ids = list(ids)
                if isinstance(ids, str):
                    ids = [ids]
                df = df.filter(df.id.isin(ids))
            # Ensure bbox is a list of lists
            if bbox and isinstance(bbox[0], float):
                bbox = [bbox]
            # Handle datetime parameter
            if datetime:
                if isinstance(datetime, (str, python_datetime.datetime)):
                    datetime = [self._expand_date(str(datetime))]
                elif isinstance(datetime, list) and isinstance(datetime[0], str):
                    datetime = [datetime]
            # Apply spatial and temporal filters
            df = self._apply_spatial_temporal_filters(df, bbox, datetime)
        # Limit the number of items if max_items is specified
        if max_items is not None:
            df = df.limit(max_items)
        return df