def get_file_systems()

in source/idea/idea-sdk/src/ideasdk/aws/aws_resources.py [0:0]


    def get_file_systems(self, vpc_id: str, refresh: bool = False) -> List[SocaFileSystem]:
        try:
            if not refresh:
                file_systems = self._db.get_file_systems()
                if file_systems is not None:
                    return file_systems

            def result_cb(result, fs_type: str) -> List[SocaFileSystem]:
                results = []
                # Amazon File Cache uses slightly different structures
                if fs_type == constants.STORAGE_PROVIDER_FSX_CACHE:
                    listing_key = 'FileCaches'
                    fsid_key = 'FileCacheId'
                else:
                    listing_key = 'FileSystems'
                    fsid_key = 'FileSystemId'

                allowed_lifecycle_values = ['available', 'updating']

                if fs_type == constants.STORAGE_PROVIDER_EFS:
                    lifecycle_key = 'LifeCycleState'
                else:
                    # FSx / File Cache
                    lifecycle_key = 'Lifecycle'

                listing = Utils.get_value_as_list(listing_key, result, [])
                for entry in listing:
                    # We only work on file systems that are in a healthy state
                    # normalized to .lower() for case insensitive comparison
                    if Utils.get_value_as_string(lifecycle_key, entry, default='error').lower() not in allowed_lifecycle_values:
                        continue
                    file_system_id = Utils.get_value_as_string(fsid_key, entry)
                    storage_virtual_machines = None
                    volumes = None

                    if fs_type == 'efs':
                        file_system_provider = constants.STORAGE_PROVIDER_EFS
                    elif fs_type == 'fsx_cache':
                        file_system_provider = constants.STORAGE_PROVIDER_FSX_CACHE
                        # Perform some checks - only supporting Lustre-based file caches for now
                        file_cache_type = Utils.get_value_as_string('FileCacheType', entry, default='unknown')
                        if file_cache_type != 'LUSTRE':
                            continue

                        lustre_configuration = Utils.get_value_as_dict('LustreConfiguration', entry, {})
                        if Utils.is_empty(lustre_configuration):
                            continue

                        file_cache_deployment_type = Utils.get_value_as_string('DeploymentType', lustre_configuration, default='unknown')
                        if file_cache_deployment_type != 'CACHE_1':
                            continue

                    else:
                        file_system_type = Utils.get_value_as_string('FileSystemType', entry)
                        if file_system_type == 'ONTAP':
                            file_system_provider = constants.STORAGE_PROVIDER_FSX_NETAPP_ONTAP
                        elif file_system_type == 'OPENZFS':
                            file_system_provider = constants.STORAGE_PROVIDER_FSX_OPENZFS
                        elif file_system_type == 'LUSTRE':
                            file_system_provider = constants.STORAGE_PROVIDER_FSX_LUSTRE
                        elif file_system_type == 'WINDOWS':
                            file_system_provider = constants.STORAGE_PROVIDER_FSX_WINDOWS_FILE_SERVER
                        else:
                            continue

                    # File Cache doesn't return Tags in the describe_file_caches
                    if fs_type == 'fsx_cache':
                        try:
                            resource_arn = Utils.get_value_as_string('ResourceARN', entry)
                            tags = self.aws.fsx().list_tags_for_resource(ResourceARN=resource_arn)['Tags']
                        except Exception as e:
                            raise e
                    else:
                        tags = Utils.get_value_as_list('Tags', entry)

                    title = self._get_tag_value('Name', tags)
                    if Utils.is_not_empty(title):
                        title = f'{title} ({fsid_key}: {file_system_id}, Provider: {file_system_provider})'
                    else:
                        title = f'{fsid_key}: {file_system_id}, Provider: {file_system_provider}'

                    if fs_type == 'efs':
                        mount_targets_result = self.aws.efs().describe_mount_targets(FileSystemId=file_system_id)
                        # Rapid invocation of describe_mount_targets can cause RateExceeded on the account
                        time.sleep(.100)
                        mount_targets = Utils.get_value_as_list('MountTargets', mount_targets_result, [])

                        vpc_found = False
                        for mount_target in mount_targets:
                            if mount_target['VpcId'] == vpc_id:
                                vpc_found = True
                                break

                        if not vpc_found:
                            continue

                    elif fs_type == 'fsx_cache':
                        if 'VpcId' not in entry:
                            continue
                        if entry['VpcId'] != vpc_id:
                            continue

                    elif fs_type == 'fsx':

                        if 'VpcId' not in entry:
                            continue
                        if entry['VpcId'] != vpc_id:
                            continue

                        if file_system_provider in (
                            constants.STORAGE_PROVIDER_FSX_NETAPP_ONTAP,
                            constants.STORAGE_PROVIDER_FSX_OPENZFS
                        ):
                            describe_volumes_result = self.aws.fsx().describe_volumes(
                                Filters=[
                                    {
                                        'Name': 'file-system-id',
                                        'Values': [file_system_id]
                                    }
                                ]
                            )
                            volumes = Utils.get_value_as_list('Volumes', describe_volumes_result, [])

                        if file_system_provider == constants.STORAGE_PROVIDER_FSX_NETAPP_ONTAP:
                            describe_svm_result = self.aws.fsx().describe_storage_virtual_machines(
                                Filters=[
                                    {
                                        'Name': 'file-system-id',
                                        'Values': [file_system_id]
                                    }
                                ]
                            )
                            storage_virtual_machines = Utils.get_value_as_list('StorageVirtualMachines', describe_svm_result, [])

                    else:
                        continue

                    results.append(SocaFileSystem(
                        type=f'aws.file-system.{fs_type}',
                        title=title,
                        provider=file_system_provider,
                        ref={
                            'file_system': entry,
                            'storage_virtual_machines': storage_virtual_machines,
                            'volumes': volumes
                        }
                    ))
                return results

            file_systems = []

            efs = self.aws_util.invoke_aws_listing(
                fn=self.aws.efs().describe_file_systems,
                result_cb=result_cb,
                marker_based_paging=True,
                fs_type='efs'
            )
            file_systems += efs

            # Amazon File Cache in a discrete try/except block as it is
            # still undergoing deployment to all regions, and we don't want to
            # maintain a static listing of supported regions.
            try:
                caches = self.aws_util.invoke_aws_listing(
                    fn=self.aws.fsx().describe_file_caches,
                    result_cb=result_cb,
                    fs_type='fsx_cache'
                )
                file_systems += caches
            except botocore.exceptions.ClientError as error:
                if error.response['Error']['Code'] == 'BadRequest':
                    pass
                else:
                    raise error

            fsx = self.aws_util.invoke_aws_listing(
                fn=self.aws.fsx().describe_file_systems,
                result_cb=result_cb,
                fs_type='fsx'
            )
            file_systems += fsx

            self._db.set_file_systems(file_systems)

            return file_systems
        except Exception as e:
            self.aws_util.handle_aws_exception(e)