in bigtop-packages/src/charm/spark/layer-spark/lib/charms/layer/bigtop_spark.py [0:0]
def start(self):
"""
Always start the Spark History Server. Start other services as
required by our execution mode. Open related ports as appropriate.
"""
host.service_start('spark-history-server')
hookenv.open_port(self.dist_config.port('spark-history-ui'))
# Spark master/worker is only started in standalone mode
if hookenv.config()['spark_execution_mode'] == 'standalone':
if host.service_start('spark-master'):
hookenv.log("Spark Master started")
hookenv.open_port(self.dist_config.port('spark-master-ui'))
# If the master started and we have peers, wait 2m for recovery
# before starting the worker. This ensures the worker binds
# to the correct master.
if unitdata.kv().get('sparkpeer.units'):
hookenv.status_set('maintenance',
'waiting for spark master recovery')
hookenv.log("Waiting 2m to ensure spark master is ALIVE")
time.sleep(120)
else:
hookenv.log("Spark Master did not start; this is normal "
"for non-leader units in standalone mode")
# NB: Start the worker even if the master process on this unit
# fails to start. In non-HA mode, spark master only runs on the
# leader. On non-leader units, we still want a worker bound to
# the leader.
if host.service_start('spark-worker'):
hookenv.log("Spark Worker started")
hookenv.open_port(self.dist_config.port('spark-worker-ui'))
else:
hookenv.log("Spark Worker did not start")