in sources/api/migration/migrator/src/main.rs [224:330]
fn run_migrations<P, S>(
repository: &tough::Repository,
direction: Direction,
migrations: &[S],
source_datastore: P,
new_version: &Version,
) -> Result<PathBuf>
where
P: AsRef<Path>,
S: AsRef<str>,
{
// We start with the given source_datastore, updating this after each migration to point to the
// output of the previous one.
let mut source_datastore = source_datastore.as_ref();
// We create a new data store (below) to serve as the target of each migration. (Start at
// source just to have the right type; we know we have migrations at this point.)
let mut target_datastore = source_datastore.to_owned();
// Any data stores we create that aren't the final one, i.e. intermediate data stores, will be
// removed at the end. (If we fail and return early, they're left for debugging purposes.)
let mut intermediate_datastores = HashSet::new();
for migration in migrations {
let migration = migration.as_ref();
let migration = migration
.try_into()
.context(error::TargetName { target: migration })?;
// get the migration from the repo
let lz4_bytes = repository
.read_target(&migration)
.context(error::LoadMigration {
migration: migration.raw(),
})?
.context(error::MigrationNotFound {
migration: migration.raw(),
})?;
// Add an LZ4 decoder so the bytes will be deflated on read
let mut reader = lz4::Decoder::new(lz4_bytes).context(error::Lz4Decode {
migration: migration.raw(),
})?;
// Create a sealed command with pentacle, so we can run the verified bytes from memory
let mut command =
pentacle::SealedCommand::new(&mut reader).context(error::SealMigration)?;
// Point each migration in the right direction, and at the given data store.
command.arg(direction.to_string());
command.args(&[
"--source-datastore".to_string(),
source_datastore.display().to_string(),
]);
// Create a new output location for this migration.
target_datastore = new_datastore_location(&source_datastore, &new_version)?;
intermediate_datastores.insert(target_datastore.clone());
command.args(&[
"--target-datastore".to_string(),
target_datastore.display().to_string(),
]);
info!("Running migration command: {:?}", command);
let output = command.output().context(error::StartMigration)?;
if !output.stdout.is_empty() {
debug!(
"Migration stdout: {}",
String::from_utf8_lossy(&output.stdout)
);
} else {
debug!("No migration stdout");
}
if !output.stderr.is_empty() {
let stderr = String::from_utf8_lossy(&output.stderr);
// We want to see migration stderr on the console, so log at error level.
error!("Migration stderr: {}", stderr);
} else {
debug!("No migration stderr");
}
ensure!(output.status.success(), error::MigrationFailure { output });
source_datastore = &target_datastore;
}
// Remove the intermediate data stores
intermediate_datastores.remove(&target_datastore);
for intermediate_datastore in intermediate_datastores {
// Even if we fail to remove an intermediate data store, we've still migrated
// successfully, and we don't want to fail the upgrade - just let someone know for
// later cleanup.
trace!(
"Removing intermediate data store at {}",
intermediate_datastore.display()
);
if let Err(e) = fs::remove_dir_all(&intermediate_datastore) {
error!(
"Failed to remove intermediate data store at '{}': {}",
intermediate_datastore.display(),
e
);
}
}
Ok(target_datastore)
}