in phoenix-queryserver-it/src/it/java/org/apache/phoenix/end2end/SecureQueryServerPhoenixDBIT.java [201:285]
public static synchronized void setUp() throws Exception {
checkForCommandOnPath("python");
checkForCommandOnPath("virtualenv");
checkForCommandOnPath("kinit");
final Configuration conf = UTIL.getConfiguration();
conf.set("hbase.regionserver.wal.codec", "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec");
// Ensure the dirs we need are created/empty
ensureIsEmptyDirectory(TEMP_DIR);
ensureIsEmptyDirectory(KEYTAB_DIR);
KEYTAB = new File(KEYTAB_DIR, "test.keytab");
// Start a MiniKDC
KDC = UTIL.setupMiniKdc(KEYTAB);
// Create a service principal and spnego principal in one keytab
// NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
// use separate identies for HBase and HDFS results in a GSS initiate error. The quick
// solution is to just use a single "service" principal instead of "hbase" and "hdfs"
// (or "dn" and "nn") per usual.
KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
// Start ZK by hand
UTIL.startMiniZKCluster();
// Create a number of unprivileged users
createUsers(3);
// Set configuration for HBase
HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
HBaseKerberosUtils.setSecuredConfiguration(conf);
setHdfsSecuredConfiguration(conf);
UserGroupInformation.setConfiguration(conf);
conf.setInt(HConstants.MASTER_PORT, 0);
conf.setInt(HConstants.MASTER_INFO_PORT, 0);
conf.setInt(HConstants.REGIONSERVER_PORT, 0);
conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
TokenProvider.class.getName());
// Secure Phoenix setup
conf.set(QueryServerProperties.QUERY_SERVER_KERBEROS_HTTP_PRINCIPAL_ATTRIB_LEGACY,
SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
conf.set(QueryServerProperties.QUERY_SERVER_HTTP_KEYTAB_FILENAME_ATTRIB,
KEYTAB.getAbsolutePath());
conf.set(QueryServerProperties.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB,
PQS_PRINCIPAL + "@" + KDC.getRealm());
conf.set(QueryServerProperties.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB,
KEYTAB.getAbsolutePath());
conf.setBoolean(QueryServerProperties.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
if(System.getProperty("do.not.randomize.pqs.port") == null) {
conf.setInt(QueryServerProperties.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
}
// Required so that PQS can impersonate the end-users to HBase
conf.set("hadoop.proxyuser.phoenixqs.groups", "*");
conf.set("hadoop.proxyuser.phoenixqs.hosts", "*");
// Clear the cached singletons so we can inject our own.
InstanceResolver.clearSingletons();
// Make sure the ConnectionInfo doesn't try to pull a default Configuration
InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
@Override
public Configuration getConfiguration() {
return conf;
}
@Override
public Configuration getConfiguration(Configuration confToClone) {
Configuration copy = new Configuration(conf);
copy.addResource(confToClone);
return copy;
}
});
updateDefaultRealm();
// Start HDFS
UTIL.startMiniDFSCluster(1);
// Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
// NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
// classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
Path rootdir = UTIL.getDataTestDirOnTestFS(SecureQueryServerPhoenixDBIT.class.getSimpleName());
// There is no setRootdir method that is available in all supported HBase versions.
conf.set(HBASE_DIR, rootdir.toString());
HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
HBASE_CLUSTER.startup();
// Then fork a thread with PQS in it.
startQueryServer();
}