in ambari-infra-solr-client/src/main/python/migrationHelper.py [0:0]
def create_backup_collections(options, accessor, parser, config, service_filter):
collections_json_location = COLLECTIONS_DATA_JSON_LOCATION.format("before_restore_collections.json")
num_docs_map = get_number_of_docs_map(COLLECTIONS_DATA_JSON_LOCATION.format("backup_collections.json"))
collections=list_collections(options, config, collections_json_location)
replica_number = "1" # hard coded
if is_ranger_available(config, service_filter):
original_ranger_collection = config.get('ranger_collection', 'ranger_collection_name')
backup_ranger_collection = config.get('ranger_collection', 'backup_ranger_collection_name')
if original_ranger_collection in collections:
if is_collection_empty(num_docs_map, original_ranger_collection):
print("Collection '{0}' was empty during backup. It won't need a backup collection.".format(original_ranger_collection))
else:
if backup_ranger_collection not in collections:
if options.collection is not None and options.collection != backup_ranger_collection:
print("Collection {0} has filtered out. Skipping create operation.".format(backup_ranger_collection))
else:
solr_urls = get_solr_urls(options, config, backup_ranger_collection, collections_json_location)
backup_ranger_config_set = config.get('ranger_collection', 'backup_ranger_config_set_name')
backup_ranger_shards = config.get('ranger_collection', 'ranger_collection_shards')
backup_ranger_max_shards = config.get('ranger_collection', 'ranger_collection_max_shards_per_node')
retry(create_collection, options, config, solr_urls, backup_ranger_collection, backup_ranger_config_set,
backup_ranger_shards, replica_number, backup_ranger_max_shards, context="[Create Solr Collections]")
else:
print("Collection {0} has already exist. Skipping create operation.".format(backup_ranger_collection))
if is_atlas_available(config, service_filter):
backup_atlas_config_set = config.get('atlas_collections', 'config_set')
backup_fulltext_index_name = config.get('atlas_collections', 'backup_fulltext_index_name')
original_fulltext_index_name = config.get('atlas_collections', 'fulltext_index_name')
if original_fulltext_index_name in collections:
if is_collection_empty(num_docs_map, original_fulltext_index_name):
print("Collection '{0}' was empty during backup. It won't need a backup collection.".format(original_fulltext_index_name))
else:
if backup_fulltext_index_name not in collections:
if options.collection is not None and options.collection != backup_fulltext_index_name:
print("Collection {0} has filtered out. Skipping create operation.".format(backup_fulltext_index_name))
else:
solr_urls = get_solr_urls(options, config, backup_fulltext_index_name, collections_json_location)
backup_fulltext_index_shards = config.get('atlas_collections', 'fulltext_index_shards')
backup_fulltext_index_max_shards = config.get('atlas_collections', 'fulltext_index_max_shards_per_node')
retry(create_collection, options, config, solr_urls, backup_fulltext_index_name, backup_atlas_config_set,
backup_fulltext_index_shards, replica_number, backup_fulltext_index_max_shards, context="[Create Solr Collections]")
else:
print("Collection {0} has already exist. Skipping create operation.".format(backup_fulltext_index_name))
backup_edge_index_name = config.get('atlas_collections', 'backup_edge_index_name')
original_edge_index_name = config.get('atlas_collections', 'edge_index_name')
if original_edge_index_name in collections:
if is_collection_empty(num_docs_map, original_edge_index_name):
print("Collection '{0}' was empty during backup. It won't need a backup collection.".format(original_edge_index_name))
else:
if backup_edge_index_name not in collections:
if options.collection is not None and options.collection != backup_edge_index_name:
print("Collection {0} has filtered out. Skipping create operation.".format(backup_edge_index_name))
else:
solr_urls = get_solr_urls(options, config, backup_edge_index_name, collections_json_location)
backup_edge_index_shards = config.get('atlas_collections', 'edge_index_shards')
backup_edge_index_max_shards = config.get('atlas_collections', 'edge_index_max_shards_per_node')
retry(create_collection, options, config, solr_urls, backup_edge_index_name, backup_atlas_config_set,
backup_edge_index_shards, replica_number, backup_edge_index_max_shards, context="[Create Solr Collections]")
else:
print("Collection {0} has already exist. Skipping create operation.".format(backup_edge_index_name))
backup_vertex_index_name = config.get('atlas_collections', 'backup_vertex_index_name')
original_vertex_index_name = config.get('atlas_collections', 'vertex_index_name')
if original_vertex_index_name in collections:
if is_collection_empty(num_docs_map, original_vertex_index_name):
print("Collection '{0}' was empty during backup. It won't need a backup collection.".format(original_vertex_index_name))
else:
if backup_vertex_index_name not in collections:
if options.collection is not None and options.collection != backup_vertex_index_name:
print("Collection {0} has filtered out. Skipping create operation.".format(backup_vertex_index_name))
else:
solr_urls = get_solr_urls(options, config, backup_vertex_index_name, collections_json_location)
backup_vertex_index_shards = config.get('atlas_collections', 'vertex_index_shards')
backup_vertex_index_max_shards = config.get('atlas_collections', 'vertex_index_max_shards_per_node')
retry(create_collection, options, config, solr_urls, backup_vertex_index_name, backup_atlas_config_set,
backup_vertex_index_shards, replica_number, backup_vertex_index_max_shards, context="[Create Solr Collections]")
else:
print("Collection {0} has already exist. Skipping create operation.".format(backup_fulltext_index_name))