in bigtop-packages/src/charm/hadoop/layer-hadoop-namenode/reactive/namenode.py [0:0]
def install_namenode():
hookenv.status_set('maintenance', 'installing namenode')
bigtop = Bigtop()
hdfs_port = get_layer_opts().port('namenode')
webhdfs_port = get_layer_opts().port('nn_webapp_http')
bigtop.render_site_yaml(
hosts={
'namenode': get_fqdn(),
},
roles=[
'namenode',
'mapred-app',
],
# NB: We want the NN to listen on all interfaces, so bind to 0.0.0.0.
overrides={
'hadoop::common_hdfs::hadoop_namenode_port': hdfs_port,
'hadoop::common_hdfs::hadoop_namenode_bind_host': '0.0.0.0',
'hadoop::common_hdfs::hadoop_namenode_http_port': webhdfs_port,
'hadoop::common_hdfs::hadoop_namenode_http_bind_host': '0.0.0.0',
'hadoop::common_hdfs::hadoop_namenode_https_bind_host': '0.0.0.0',
}
)
bigtop.trigger_puppet()
# /etc/hosts entries from the KV are not currently used for bigtop,
# but a hosts_map attribute is required by some interfaces (eg: dfs-slave)
# to signify NN's readiness. Set our NN info in the KV to fulfill this
# requirement.
utils.initialize_kv_host()
# We need to create the 'mapred' and 'spark' user/group since we may not
# be installing hadoop-mapreduce or spark on this machine. This is needed
# so the namenode can access yarn and spark job history files in hdfs. Also
# add our ubuntu user to the hadoop, mapred, and spark groups.
get_layer_opts().add_users()
set_state('apache-bigtop-namenode.installed')
hookenv.status_set('maintenance', 'namenode installed')