Object addEntry()

in endorsed/src/org.apache.sis.storage.geotiff/main/org/apache/sis/storage/geotiff/ImageFileDirectory.java [532:1143]


    Object addEntry(final short tag, final Type type, final long count) throws Exception {
        switch (tag) {

            //  ╔═════════════════════════════════════════════════════════════════════════════════════╗
            //  ║                                                                                     ║
            //  ║    Essential information for being able to read the image at least as grayscale.    ║
            //  ║    In Java2D, following information are needed for building the SampleModel.        ║
            //  ║                                                                                     ║
            //  ╚═════════════════════════════════════════════════════════════════════════════════════╝

            /*
             * How the components of each pixel are stored.
             * 1 = Chunky format. The component values for each pixel are stored contiguously (for example RGBRGBRGB).
             * 2 = Planar format. For example, one plane of Red components, one plane of Green and one plane if Blue.
             */
            case TAG_PLANAR_CONFIGURATION: {
                final int value = type.readAsInt(input(), count);
                switch (value) {
                    case PLANAR_CONFIGURATION_CHUNKY: isPlanar = false; break;
                    case PLANAR_CONFIGURATION_PLANAR: isPlanar = true;  break;
                    default: return value;      // Cause a warning to be reported by the caller.
                }
                break;
            }
            /*
             * The number of columns in the image, i.e., the number of pixels per row.
             */
            case TAG_IMAGE_WIDTH: {
                imageWidth = type.readAsUnsignedLong(input(), count);
                break;
            }
            /*
             * The number of rows of pixels in the image.
             */
            case TAG_IMAGE_LENGTH: {
                imageHeight = type.readAsUnsignedLong(input(), count);
                break;
            }
            /*
             * The tile width in pixels. This is the number of columns in each tile.
             */
            case TAG_TILE_WIDTH: {
                setTileTagFamily(TILE);
                tileWidth = type.readAsInt(input(), count);
                break;
            }
            /*
             * The tile length (height) in pixels. This is the number of rows in each tile.
             */
            case TAG_TILE_LENGTH: {
                setTileTagFamily(TILE);
                tileHeight = type.readAsInt(input(), count);
                break;
            }
            /*
             * The number of rows per strip. This is considered by SIS as a special kind of tiles.
             * From this point of view, TileLength = RowPerStrip and TileWidth = ImageWidth.
             */
            case TAG_ROWS_PER_STRIP: {
                setTileTagFamily(STRIP);
                tileHeight = type.readAsInt(input(), count);
                break;
            }
            /*
             * The tile length (height) in pixels. This is the number of rows in each tile.
             */
            case TAG_TILE_OFFSETS: {
                setTileTagFamily(TILE);
                tileOffsets = type.readAsVector(input(), count);
                break;
            }
            /*
             * For each strip, the byte offset of that strip relative to the beginning of the TIFF file.
             * In Apache SIS implementation, strips are considered as a special kind of tiles.
             */
            case TAG_STRIP_OFFSETS: {
                setTileTagFamily(STRIP);
                tileOffsets = type.readAsVector(input(), count);
                break;
            }
            /*
             * The tile width in pixels. This is the number of columns in each tile.
             */
            case TAG_TILE_BYTE_COUNTS: {
                setTileTagFamily(TILE);
                tileByteCounts = type.readAsVector(input(), count);
                break;
            }
            /*
             * For each strip, the number of bytes in the strip after compression.
             * In Apache SIS implementation, strips are considered as a special kind of tiles.
             */
            case TAG_STRIP_BYTE_COUNTS: {
                setTileTagFamily(STRIP);
                tileByteCounts = type.readAsVector(input(), count);
                break;
            }
            /*
             * Legacy tags for JPEG formats, to be also interpreted as a tile.
             */
            case TAG_JPEG_INTERCHANGE_FORMAT: {
                setTileTagFamily(JPEG);
                tileOffsets = type.readAsVector(input(), count);
                break;
            }
            case TAG_JPEG_INTERCHANGE_FORMAT_LENGTH: {
                setTileTagFamily(JPEG);
                tileByteCounts = type.readAsVector(input(), count);
                break;
            }

            //  ╔═══════════════════════════════════════════════════════════════════════════════════╗
            //  ║                                                                                   ║
            //  ║    Information that define how the sample values are organized (their layout).    ║
            //  ║    In Java2D, following information are needed for building the SampleModel.      ║
            //  ║                                                                                   ║
            //  ╚═══════════════════════════════════════════════════════════════════════════════════╝

            /*
             * Compression scheme used on the image data.
             */
            case TAG_COMPRESSION: {
                final int value = type.readAsInt(input(), count);
                compression = Compression.valueOf(value);
                if (compression == Compression.UNKNOWN) {
                    return value;                           // Cause a warning to be reported by the caller.
                }
                break;
            }
            /*
             * Mathematical operator that is applied to the image data before an encoding scheme is applied.
             * 1=none, 2=horizontal differencing. More values may be added in the future.
             */
            case TAG_PREDICTOR: {
                final int value = type.readAsInt(input(), count);
                predictor = Predictor.valueOf(value);
                if (predictor == Predictor.UNKNOWN) {
                    return value;                           // Cause a warning to be reported by the caller.
                }
                break;
            }
            /*
             * The logical order of bits within a byte. If this value is 2, then
             * bits order shall be reversed in every bytes before decompression.
             */
            case TAG_FILL_ORDER: {
                final int value = type.readAsInt(input(), count);
                switch (value) {
                    case FILL_ORDER_LEFT_TO_RIGHT: isBitOrderReversed = false; break;
                    case FILL_ORDER_RIGHT_TO_LEFT: isBitOrderReversed = true;  break;
                    default: return value;      // Cause a warning to be reported by the caller.
                }
                break;
            }
            /*
             * How to interpret each data sample in a pixel. The size of data samples is still
             * specified by the BitsPerSample field.
             */
            case TAG_SAMPLE_FORMAT: {
                final int value = type.readAsInt(input(), count);
                switch (value) {
                    default: return value;      // Warning to be reported by the caller.
                    case SAMPLE_FORMAT_UNSIGNED_INTEGER: sampleFormat = UNSIGNED; break;
                    case SAMPLE_FORMAT_SIGNED_INTEGER:   sampleFormat = SIGNED;   break;
                    case SAMPLE_FORMAT_FLOATING_POINT:   sampleFormat = FLOAT;    break;
                    case SAMPLE_FORMAT_UNDEFINED: {
                        warning(Level.WARNING, Resources.Keys.UndefinedDataFormat_1, filename());
                        break;
                    }
                }
                break;
            }
            /*
             * Number of bits per component. The array length should be the number of components in a
             * pixel (e.g. 3 for RGB values). Typically, all components have the same number of bits.
             * But the TIFF specification allows different values.
             */
            case TAG_BITS_PER_SAMPLE: {
                final Vector values = type.readAsVector(input(), count);
                /*
                 * The current implementation requires that all `bitsPerSample` elements have the same value.
                 * This restriction may be revisited in future Apache SIS versions.
                 * Note: `count` is never zero when this method is invoked, so we do not need to check bounds.
                 */
                bitsPerSample = values.shortValue(0);
                final int length = values.size();
                for (int i = 1; i < length; i++) {
                    if (values.shortValue(i) != bitsPerSample) {
                        throw new DataStoreContentException(reader.resources().getString(
                                Resources.Keys.ConstantValueRequired_3, "BitsPerSample", filename(), values));
                    }
                }
                break;
            }
            /*
             * The number of components per pixel. Usually 1 for bilevel, grayscale, and palette-color images,
             * and 3 for RGB images. Default value is 1. May be greater than 3 if there is extra samples.
             */
            case TAG_SAMPLES_PER_PIXEL: {
                samplesPerPixel = type.readAsShort(input(), count);
                break;
            }
            /*
             * Specifies that each pixel has N extra components. When this field is used, the SamplesPerPixel field
             * has a value greater than the PhotometricInterpretation field suggests. For example, a full-color RGB
             * image normally has SamplesPerPixel=3. If SamplesPerPixel is greater than 3, then the ExtraSamples field
             * describes the meaning of the extra samples. It may be an alpha channel, but not necessarily.
             */
            case TAG_EXTRA_SAMPLES: {
                extraSamples = type.readAsVector(input(), count);
                break;
            }

            //  ╔═════════════════════════════════════════════════════════════════════════════════╗
            //  ║                                                                                 ║
            //  ║    Information related to the color palette or the meaning of sample values.    ║
            //  ║    In Java2D, following information are needed for building the ColorModel.     ║
            //  ║                                                                                 ║
            //  ╚═════════════════════════════════════════════════════════════════════════════════╝

            /*
             * The color space of the image data.
             * 0 = WhiteIsZero. For bilevel and grayscale images: 0 is imaged as white.
             * 1 = BlackIsZero. For bilevel and grayscale images: 0 is imaged as black.
             * 2 = RGB. RGB value of (0,0,0) represents black, and (65535,65535,65535) represents white.
             * 3 = Palette color. The value of the component is used as an index into the RGB values of the ColorMap.
             * 4 = Transparency Mask. Defines an irregularly shaped region of another image in the same TIFF file.
             */
            case TAG_PHOTOMETRIC_INTERPRETATION: {
                final short value = type.readAsShort(input(), count);
                if (value < 0 || value > Byte.MAX_VALUE) return value;
                photometricInterpretation = (byte) value;
                break;
            }
            /*
             * The lookup table for palette-color images. This is represented by IndexColorModel in Java2D.
             * Color space is RGB if PhotometricInterpretation is "PaletteColor", or another color space otherwise.
             * In the RGB case, all the Red values come first, followed by all Green values, then all Blue values.
             * The number of values for each color is (1 << BitsPerSample) where 0 represents the minimum intensity
             * (black is 0,0,0) and 65535 represents the maximum intensity.
             */
            case TAG_COLOR_MAP: {
                colorMap = type.readAsVector(input(), count);
                break;
            }
            /*
             * The minimum component value used. MinSampleValue is a single value that apply to all bands
             * while SMinSampleValue lists separated values for each band. Default is 0.
             */
            case TAG_MIN_SAMPLE_VALUE:
            case TAG_S_MIN_SAMPLE_VALUE: {
                minValues = extremum(minValues, type.readAsVector(input(), count), false);
                isMinSpecified = true;
                break;
            }
            /*
             * The maximum component value used. Default is {@code (1 << BitsPerSample) - 1}.
             * This field is for statistical purposes and should not to be used to affect the
             * visual appearance of an image, unless a map styling is applied.
             */
            case TAG_MAX_SAMPLE_VALUE:
            case TAG_S_MAX_SAMPLE_VALUE: {
                maxValues = extremum(maxValues, type.readAsVector(input(), count), true);
                isMaxSpecified = true;
                break;
            }

            //  ╔═════════════════════════════════════════════════════════════════════════════════╗
            //  ║                                                                                 ║
            //  ║    Information useful for defining the image role in a multi-images context.    ║
            //  ║                                                                                 ║
            //  ╚═════════════════════════════════════════════════════════════════════════════════╝

            /*
             * A general indication of the kind of data contained in this subfile, mainly useful when there
             * are multiple subfiles in a single TIFF file. This field is made up of a set of 32 flag bits.
             *
             * Bit 0 is 1 if the image is a reduced-resolution version of another image in this TIFF file.
             * Bit 1 is 1 if the image is a single page of a multi-page image (see PageNumber).
             * Bit 2 is 1 if the image defines a transparency mask for another image in this TIFF file (see PhotometricInterpretation).
             * Bit 4 indicates MRC imaging model as described in ITU-T recommendation T.44 [T.44] (See ImageLayer tag) - RFC 2301.
             */
            case TAG_NEW_SUBFILE_TYPE: {
                subfileType = type.readAsInt(input(), count);
                break;
            }
            /*
             * Old version (now deprecated) of above NewSubfileType.
             * 1 = full-resolution image data
             * 2 = reduced-resolution image data
             * 3 = a single page of a multi-page image (see PageNumber).
             */
            case TAG_SUBFILE_TYPE: {
                final int value = type.readAsInt(input(), count);
                switch (value) {
                    default: return value;                // Warning to be reported by the caller.
                    case SUBFILE_TYPE_FULL_RESOLUTION:    subfileType &= ~NEW_SUBFILE_TYPE_REDUCED_RESOLUTION; break;
                    case SUBFILE_TYPE_REDUCED_RESOLUTION: subfileType |=  NEW_SUBFILE_TYPE_REDUCED_RESOLUTION; break;
                    case SUBFILE_TYPE_SINGLE_PAGE:        subfileType |=  NEW_SUBFILE_TYPE_SINGLE_PAGE;        break;
                }
                break;
            }

            //  ╔════════════════════════════════════════════════════════════════════════════════════╗
            //  ║                                                                                    ║
            //  ║    Information related to the Coordinate Reference System and the bounding box.    ║
            //  ║                                                                                    ║
            //  ╚════════════════════════════════════════════════════════════════════════════════════╝

            /*
             * References the "GeoKeys" needed for building the Coordinate Reference System.
             * An array of unsigned SHORT values, which are primarily grouped into blocks of 4.
             * The first 4 values are special, and contain GeoKey directory header information.
             */
            case (short) TAG_GEO_KEY_DIRECTORY: {
                referencing().keyDirectory = type.readAsVector(input(), count);
                break;
            }
            /*
             * Stores all of the `double` valued GeoKeys, referenced by the GeoKeyDirectory.
             */
            case (short) TAG_GEO_DOUBLE_PARAMS: {
                referencing().numericParameters = type.readAsVector(input(), count);
                break;
            }
            /*
             * Stores all the characters referenced by the GeoKeyDirectory. Should contain exactly one string
             * which will be split by CRSBuilder, but we allow an arbitrary amount as a paranoiac check.
             * Note that TIFF files use 0 as the end delimiter in strings (C/C++ convention).
             */
            case (short) TAG_GEO_ASCII_PARAMS: {
                referencing().setAsciiParameters(type.readAsStrings(input(), count, encoding()));
                break;
            }
            /*
             * The orientation of the image with respect to the rows and columns.
             * This is an integer numeroted from 1 to 7 inclusive (see TIFF specification for meaning).
             */
            case TAG_ORIENTATION: {
                // TODO
                break;
            }
            /*
             * Specifies the "grid to CRS" conversion between the raster space and the model space.
             * If specified, the tag shall have the 16 values of a 4×4 matrix in row-major fashion.
             * The last matrix row (i.e. the last 4 values) should be [0 0 0 1].
             * The row before should be [0 0 0 0] if the conversion is two-dimensional.
             * This block does not reduce the number of dimensions from 3 to 2.
             * Only one of `ModelPixelScaleTag` and `ModelTransformationTag` should be used.
             */
            case (short) TAG_MODEL_TRANSFORMATION: {
                final Vector m = type.readAsVector(input(), count);
                final int n;
                switch (m.size()) {
                    case  6:                    // Assume 2D model with implicit [0 0 1] last row.
                    case  9: n = 3; break;      // Assume 2D model with full 3×3 matrix.
                    case 12:                    // Assume 3D model with implicit [0 0 0 1] last row.
                    case 16: n = 4; break;      // 3D model with full 4×4 matrix, as required by GeoTIFF spec.
                    default: return m;
                }
                referencing().setGridToCRS(m, n);
                break;
            }
            /*
             * A vector of 3 floating-point values defining the "grid to CRS" conversion without rotation.
             * The conversion is defined as below, when (I,J,K,X,Y,Z) is the tie point singleton record:
             *
             * ┌                       ┐
             * │   Sx   0    0    Tx   │       Tx = X - I/Sx
             * │   0   -Sy   0    Ty   │       Ty = Y + J/Sy
             * │   0    0    Sz   Tz   │       Tz = Z - K/Sz  (if not 0)
             * │   0    0    0    1    │
             * └                       ┘
             *
             * This block sets the translation column to NaN, meaning that it will need to be computed from
             * the tie point. Only one of `ModelPixelScaleTag` and `ModelTransformationTag` should be used.
             */
            case (short) TAG_MODEL_PIXEL_SCALE: {
                final Vector m = type.readAsVector(input(), count);
                final int size = m.size();
                if (size < 2 || size > 3) {     // Length should be exactly 3, but we make this reader tolerant.
                    return m;
                }
                referencing().setScaleFactors(m);
                break;
            }
            /*
             * The mapping from pixel coordinates to CRS coordinates as a sequence of (I,J,K, X,Y,Z) records.
             * This tag is also known as `Georeference`.
             */
            case (short) TAG_MODEL_TIE_POINT: {
                referencing().modelTiePoints = type.readAsVector(input(), count);
                break;
            }

            //  ╔════════════════════════════════════════════════════════════════════════════╗
            //  ║                                                                            ║
            //  ║    Metadata for discovery purposes, conditions of use, etc.                ║
            //  ║    Those metadata are not "critical" information for reading the image.    ║
            //  ║    Should not write anything under `metadata/contentInfo` node.            ║
            //  ║                                                                            ║
            //  ╚════════════════════════════════════════════════════════════════════════════╝

            /*
             * The name of the document from which this image was scanned.
             *
             * Destination: metadata/identificationInfo/citation/series/name
             */
            case TAG_DOCUMENT_NAME: {
                for (final String value : type.readAsStrings(input(), count, encoding())) {
                    metadata.addSeries(value);
                }
                break;
            }
            /*
             * The name of the page from which this image was scanned.
             *
             * Destination: metadata/identificationInfo/citation/series/page
             */
            case TAG_PAGE_NAME: {
                for (final String value : type.readAsStrings(input(), count, encoding())) {
                    metadata.addPage(value);
                }
                break;
            }
            /*
             * The page number of the page from which this image was scanned.
             * Should be a vector of length 2 containing the page number and
             * the total number of pages (with 0 meaning unavailable).
             *
             * Destination: metadata/identificationInfo/citation/series/page
             */
            case TAG_PAGE_NUMBER: {
                final Vector v = type.readAsVector(input(), count);
                final int n = v.size();
                if (n >= 1) {
                    metadata.addPage(v.intValue(0), (n >= 2) ? v.intValue(1) : 0);
                }
                break;
            }
            /*
             * A string that describes the subject of the image.
             * For example, a user may wish to attach a comment such as "1988 company picnic" to an image.
             *
             * Destination: metadata/identificationInfo/citation/title
             */
            case TAG_IMAGE_DESCRIPTION: {
                for (final String value : type.readAsStrings(input(), count, encoding())) {
                    metadata.addTitle(Strings.singleLine(" ", value));
                }
                break;
            }
            /*
             * Person who created the image. Some older TIFF files used this tag for storing
             * Copyright information, but Apache SIS does not support this legacy practice.
             *
             * Destination: metadata/identificationInfo/citation/party/name
             */
            case TAG_ARTIST: {
                for (final String value : type.readAsStrings(input(), count, encoding())) {
                    metadata.addAuthor(value);
                }
                break;
            }
            /*
             * Copyright notice of the person or organization that claims the copyright to the image.
             * Example: “Copyright, John Smith, 1992. All rights reserved.”
             *
             * Destination: metadata/identificationInfo/resourceConstraint
             */
            case (short) TAG_COPYRIGHT: {
                for (final String value : type.readAsStrings(input(), count, encoding())) {
                    metadata.parseLegalNotice(null, value);
                }
                break;
            }
            /*
             * Date and time of image creation. The format is: "YYYY:MM:DD HH:MM:SS" with 24-hour clock.
             *
             * Destination: metadata/identificationInfo/citation/date
             */
            case TAG_DATE_TIME: {
                for (final String value : type.readAsStrings(input(), count, encoding())) {
                    metadata.addCitationDate(reader.store.getDateFormat().parse(value).toInstant(),
                            DateType.CREATION, ImageMetadataBuilder.Scope.RESOURCE);
                }
                break;
            }
            /*
             * The computer and/or operating system in use at the time of image creation.
             *
             * Destination: metadata/resourceLineage/processStep/processingInformation/procedureDescription
             */
            case TAG_HOST_COMPUTER: {
                for (final String value : type.readAsStrings(input(), count, encoding())) {
                    metadata.addHostComputer(value);
                }
                break;
            }
            /*
             * Name and version number of the software package(s) used to create the image.
             *
             * Destination: metadata/resourceLineage/processStep/processingInformation/softwareReference/title
             */
            case TAG_SOFTWARE: {
                for (final String value : type.readAsStrings(input(), count, encoding())) {
                    metadata.addSoftwareReference(value);
                }
                break;
            }
            /*
             * Manufacturer of the scanner, video digitizer, or other type of equipment used to generate the image.
             * Synthetic images should not include this field.
             */
            case TAG_MAKE: {
                // TODO: is Instrument.citation.citedResponsibleParty.party.name an appropriate place?
                // what would be the citation title? A copy of Tags.Model?
                break;
            }
            /*
             * The model name or number of the scanner, video digitizer, or other type of equipment used to
             * generate the image.
             *
             * Destination: metadata/acquisitionInformation/platform/instrument/identifier
             */
            case TAG_MODEL: {
                for (final String value : type.readAsStrings(input(), count, encoding())) {
                    metadata.addInstrument(null, value);
                }
                break;
            }
            /*
             * The number of pixels per ResolutionUnit in the ImageWidth or ImageHeight direction.
             */
            case TAG_X_RESOLUTION:
            case TAG_Y_RESOLUTION: {
                metadata.setResolution(type.readAsDouble(input(), count));
                break;
            }
            /*
             * The unit of measurement for XResolution and YResolution.
             *
             *   1 = None. Used for images that may have a non-square aspect ratio.
             *   2 = Inch (default).
             *   3 = Centimeter.
             */
            case TAG_RESOLUTION_UNIT: {
                return metadata.setResolutionUnit(type.readAsInt(input(), count));
                // Non-null return value cause a warning to be reported by the caller.
            }
            /*
             * For black and white TIFF files that represent shades of gray, the technique used to convert
             * from gray to black and white pixels. The default value is 1 (nothing done on the image).
             *
             *   1 = No dithering or halftoning has been applied to the image data.
             *   2 = An ordered dither or halftone technique has been applied to the image data.
             *   3 = A randomized process such as error diffusion has been applied to the image data.
             */
            case TAG_THRESHHOLDING: {
                return metadata.setThreshholding(type.readAsShort(input(), count));
                // Non-null return value cause a warning to be reported by the caller.
            }
            /*
             * The width and height of the dithering or halftoning matrix used to create
             * a dithered or halftoned bilevel file. Meaningful only if Threshholding = 2.
             */
            case TAG_CELL_WIDTH:
            case TAG_CELL_LENGTH: {
                metadata.setCellSize(type.readAsShort(input(), count), tag == TAG_CELL_WIDTH);
                break;
            }

            //  ╔════════════════════════════════════════════════════════════╗
            //  ║                                                            ║
            //  ║    Defined by TIFF specification but currently ignored.    ║
            //  ║                                                            ║
            //  ╚════════════════════════════════════════════════════════════╝

            /*
             * For each string of contiguous unused bytes in a TIFF file, the number of bytes and the byte offset
             * in the string. Those tags are deprecated and do not need to be supported.
             */
            case TAG_FREE_BYTE_COUNTS:
            case TAG_FREE_OFFSETS:
            /*
             * For grayscale data, the optical density of each possible pixel value, plus the precision of that
             * information. This is ignored by most TIFF readers.
             */
            case TAG_GRAY_RESPONSE_CURVE:
            case TAG_GRAY_RESPONSE_UNIT: {
                warning(Level.FINE, Resources.Keys.IgnoredTag_1, Tags.name(tag));
                break;
            }

            //  ╔════════════════════════════════════════════╗
            //  ║                                            ║
            //  ║    Extensions defined by DGIWG or GDAL.    ║
            //  ║                                            ║
            //  ╚════════════════════════════════════════════╝

            case Tags.GEO_METADATA:
            case Tags.GDAL_METADATA: {
                metadata.addXML(reader.readXML(type, count, tag));
                break;
            }
            case Tags.GDAL_NODATA: {
                noData = type.readAsDouble(input(), count);
                break;
            }
        }
        return null;
    }