public ResultSet getColumns()

in pgjdbc/src/main/java/org/postgresql/jdbc/PgDatabaseMetaData.java [1494:1704]


  public ResultSet getColumns(@Nullable String catalog, @Nullable String schemaPattern,
      @Nullable String tableNamePattern,
      @Nullable String columnNamePattern) throws SQLException {

    int numberOfFields = 24; // JDBC4
    List<Tuple> v = new ArrayList<Tuple>(); // The new ResultSet tuple stuff
    Field[] f = new Field[numberOfFields]; // The field descriptors for the new ResultSet

    f[0] = new Field("TABLE_CAT", Oid.VARCHAR);
    f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR);
    f[2] = new Field("TABLE_NAME", Oid.VARCHAR);
    f[3] = new Field("COLUMN_NAME", Oid.VARCHAR);
    f[4] = new Field("DATA_TYPE", Oid.INT2);
    f[5] = new Field("TYPE_NAME", Oid.VARCHAR);
    f[6] = new Field("COLUMN_SIZE", Oid.INT4);
    f[7] = new Field("BUFFER_LENGTH", Oid.VARCHAR);
    f[8] = new Field("DECIMAL_DIGITS", Oid.INT4);
    f[9] = new Field("NUM_PREC_RADIX", Oid.INT4);
    f[10] = new Field("NULLABLE", Oid.INT4);
    f[11] = new Field("REMARKS", Oid.VARCHAR);
    f[12] = new Field("COLUMN_DEF", Oid.VARCHAR);
    f[13] = new Field("SQL_DATA_TYPE", Oid.INT4);
    f[14] = new Field("SQL_DATETIME_SUB", Oid.INT4);
    f[15] = new Field("CHAR_OCTET_LENGTH", Oid.VARCHAR);
    f[16] = new Field("ORDINAL_POSITION", Oid.INT4);
    f[17] = new Field("IS_NULLABLE", Oid.VARCHAR);
    f[18] = new Field("SCOPE_CATALOG", Oid.VARCHAR);
    f[19] = new Field("SCOPE_SCHEMA", Oid.VARCHAR);
    f[20] = new Field("SCOPE_TABLE", Oid.VARCHAR);
    f[21] = new Field("SOURCE_DATA_TYPE", Oid.INT2);
    f[22] = new Field("IS_AUTOINCREMENT", Oid.VARCHAR);
    f[23] = new Field( "IS_GENERATEDCOLUMN", Oid.VARCHAR);

    String sql;
    // a.attnum isn't decremented when preceding columns are dropped,
    // so the only way to calculate the correct column number is with
    // window functions, new in 8.4.
    //
    // We want to push as much predicate information below the window
    // function as possible (schema/table names), but must leave
    // column name outside so we correctly count the other columns.
    //
    if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
      sql = "SELECT * FROM (";
    } else {
      sql = "";
    }

    sql += "SELECT n.nspname,c.relname,a.attname,a.atttypid,a.attnotnull "
           + "OR (t.typtype = 'd' AND t.typnotnull) AS attnotnull,a.atttypmod,a.attlen,t.typtypmod,";

    if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
      sql += "row_number() OVER (PARTITION BY a.attrelid ORDER BY a.attnum) AS attnum, ";
    } else {
      sql += "a.attnum,";
    }

    if (connection.haveMinimumServerVersion(ServerVersion.v10)) {
      sql += "nullif(a.attidentity, '') as attidentity,";
    } else {
      sql += "null as attidentity,";
    }
    sql += "pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS adsrc,dsc.description,t.typbasetype,t.typtype "
           + " FROM pg_catalog.pg_namespace n "
           + " JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid) "
           + " JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid) "
           + " JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) "
           + " LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) "
           + " LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid) "
           + " LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class') "
           + " LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog') "
           + " WHERE c.relkind in ('r','p','v','f','m') and a.attnum > 0 AND NOT a.attisdropped ";

    if (schemaPattern != null && !schemaPattern.isEmpty()) {
      sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
    }
    if (tableNamePattern != null && !tableNamePattern.isEmpty()) {
      sql += " AND c.relname LIKE " + escapeQuotes(tableNamePattern);
    }
    if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
      sql += ") c WHERE true ";
    }
    if (columnNamePattern != null && !columnNamePattern.isEmpty()) {
      sql += " AND attname LIKE " + escapeQuotes(columnNamePattern);
    }
    sql += " ORDER BY nspname,c.relname,attnum ";

    Statement stmt = connection.createStatement();
    ResultSet rs = stmt.executeQuery(sql);
    while (rs.next()) {
      byte[] @Nullable [] tuple = new byte[numberOfFields][];
      int typeOid = (int) rs.getLong("atttypid");
      int typeMod = rs.getInt("atttypmod");

      tuple[0] = null; // Catalog name, not supported
      tuple[1] = rs.getBytes("nspname"); // Schema
      tuple[2] = rs.getBytes("relname"); // Table name
      tuple[3] = rs.getBytes("attname"); // Column name

      String typtype = rs.getString("typtype");
      int sqlType;
      if ("c".equals(typtype)) {
        sqlType = Types.STRUCT;
      } else if ("d".equals(typtype)) {
        sqlType = Types.DISTINCT;
      } else if ("e".equals(typtype)) {
        sqlType = Types.VARCHAR;
      } else {
        sqlType = connection.getTypeInfo().getSQLType(typeOid);
      }

      tuple[4] = connection.encodeString(Integer.toString(sqlType));
      String pgType = connection.getTypeInfo().getPGType(typeOid);
      tuple[5] = connection.encodeString(pgType); // Type name
      tuple[7] = null; // Buffer length

      String defval = rs.getString("adsrc");

      if (defval != null && defval.contains("nextval(") ) {
        if ("int4".equals(pgType)) {
          tuple[5] = connection.encodeString("serial"); // Type name == serial
        } else if ("int8".equals(pgType)) {
          tuple[5] = connection.encodeString("bigserial"); // Type name == bigserial
        } else if ("int2".equals(pgType) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
          tuple[5] = connection.encodeString("smallserial"); // Type name == smallserial
        }
      }
      String identity = rs.getString("attidentity");

      int baseTypeOid = (int) rs.getLong("typbasetype");

      int decimalDigits;
      int columnSize;

      /* this is really a DOMAIN type not sure where DISTINCT came from */
      if ( sqlType == Types.DISTINCT ) {
        /*
        From the docs if typtypmod is -1
         */
        int typtypmod = rs.getInt("typtypmod");
        decimalDigits = connection.getTypeInfo().getScale(baseTypeOid, typeMod);
        /*
        From the postgres docs:
        Domains use typtypmod to record the typmod to be applied to their
        base type (-1 if base type does not use a typmod). -1 if this type is not a domain.
        if it is -1 then get the precision from the basetype. This doesn't help if the basetype is
        a domain, but for actual types this will return the correct value.
         */
        if ( typtypmod == -1 ) {
          columnSize = connection.getTypeInfo().getPrecision(baseTypeOid, typeMod);
        } else if (baseTypeOid == Oid.NUMERIC ) {
          decimalDigits = connection.getTypeInfo().getScale(baseTypeOid, typtypmod);
          columnSize = connection.getTypeInfo().getPrecision(baseTypeOid, typtypmod);
        } else {
          columnSize = typtypmod;
        }
      } else {
        decimalDigits = connection.getTypeInfo().getScale(typeOid, typeMod);
        columnSize = connection.getTypeInfo().getPrecision(typeOid, typeMod);
        if (columnSize == 0) {
          columnSize = connection.getTypeInfo().getDisplaySize(typeOid, typeMod);
        }
      }
      tuple[6] = connection.encodeString(Integer.toString(columnSize));
      // Give null for an unset scale on Decimal and Numeric columns
      if (((sqlType == Types.NUMERIC) || (sqlType == Types.DECIMAL)) && (typeMod == -1)) {
        tuple[8] = null;
      } else {
        tuple[8] = connection.encodeString(Integer.toString(decimalDigits));
      }

      // Everything is base 10 unless we override later.
      tuple[9] = connection.encodeString("10");

      if ("bit".equals(pgType) || "varbit".equals(pgType)) {
        tuple[9] = connection.encodeString("2");
      }

      tuple[10] = connection.encodeString(Integer.toString(rs.getBoolean("attnotnull")
          ? java.sql.DatabaseMetaData.columnNoNulls : java.sql.DatabaseMetaData.columnNullable)); // Nullable
      tuple[11] = rs.getBytes("description"); // Description (if any)
      tuple[12] = rs.getBytes("adsrc"); // Column default
      tuple[13] = null; // sql data type (unused)
      tuple[14] = null; // sql datetime sub (unused)
      tuple[15] = tuple[6]; // char octet length
      tuple[16] = connection.encodeString(String.valueOf(rs.getInt("attnum"))); // ordinal position
      // Is nullable
      tuple[17] = connection.encodeString(rs.getBoolean("attnotnull") ? "NO" : "YES");

      tuple[18] = null; // SCOPE_CATLOG
      tuple[19] = null; // SCOPE_SCHEMA
      tuple[20] = null; // SCOPE_TABLE
      tuple[21] = baseTypeOid == 0
                  ? null
                  : connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(baseTypeOid))); // SOURCE_DATA_TYPE

      String autoinc = "NO";
      if (defval != null && defval.contains("nextval(") || identity != null) {
        autoinc = "YES";
      }
      tuple[22] = connection.encodeString(autoinc);
      // there is no way to tell if the value was actually autogenerated.
      tuple[23] = connection.encodeString("");

      v.add(new Tuple(tuple));
    }
    rs.close();
    stmt.close();

    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
  }