in bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/reactive/resourcemanager.py [0:0]
def install_resourcemanager(namenode):
"""Install if the namenode has sent its FQDN.
We only need the namenode FQDN to perform the RM install, so poll for
namenodes() data whenever we have a namenode relation. This allows us to
install asap, even if 'namenode.ready' is not set yet.
"""
if namenode.namenodes():
hookenv.status_set('maintenance', 'installing resourcemanager')
# Hosts
nn_host = namenode.namenodes()[0]
rm_host = get_fqdn()
# Ports
rm_ipc = get_layer_opts().port('resourcemanager')
rm_http = get_layer_opts().port('rm_webapp_http')
jh_ipc = get_layer_opts().port('jobhistory')
jh_http = get_layer_opts().port('jh_webapp_http')
hdfs_port = namenode.port()
webhdfs_port = namenode.webhdfs_port()
bigtop = Bigtop()
bigtop.render_site_yaml(
hosts={
'namenode': nn_host,
'resourcemanager': rm_host,
},
roles=[
'resourcemanager',
],
# NB: When we colocate the NN and RM, the RM will run puppet apply
# last. To ensure we don't lose any hdfs-site.xml data set by the
# NN, override common_hdfs properties again here.
overrides={
'hadoop::common_yarn::hadoop_rm_port': rm_ipc,
'hadoop::common_yarn::hadoop_rm_webapp_port': rm_http,
'hadoop::common_yarn::hadoop_rm_bind_host': '0.0.0.0',
'hadoop::common_mapred_app::mapreduce_jobhistory_host': '0.0.0.0',
'hadoop::common_mapred_app::mapreduce_jobhistory_port': jh_ipc,
'hadoop::common_mapred_app::mapreduce_jobhistory_webapp_port': jh_http,
'hadoop::common_hdfs::hadoop_namenode_port': hdfs_port,
'hadoop::common_hdfs::hadoop_namenode_bind_host': '0.0.0.0',
'hadoop::common_hdfs::hadoop_namenode_http_port': webhdfs_port,
'hadoop::common_hdfs::hadoop_namenode_http_bind_host': '0.0.0.0',
'hadoop::common_hdfs::hadoop_namenode_https_bind_host': '0.0.0.0',
}
)
bigtop.trigger_puppet()
# /etc/hosts entries from the KV are not currently used for bigtop,
# but a hosts_map attribute is required by some interfaces (eg: mapred-slave)
# to signify RM's readiness. Set our RM info in the KV to fulfill this
# requirement.
utils.initialize_kv_host()
# We need to create the 'spark' user/group since we may not be
# installing spark on this machine. This is needed so the history
# server can access spark job history files in hdfs. Also add our
# ubuntu user to the hadoop, mapred, and spark groups on this machine.
get_layer_opts().add_users()
set_state('apache-bigtop-resourcemanager.installed')
hookenv.status_set('maintenance', 'resourcemanager installed')
else:
hookenv.status_set('waiting', 'waiting for namenode fqdn')