in mdb_shard/src/shard_format.rs [411:464]
fn convert_and_save_cas_info<W: Write>(
writer: &mut W,
cas_content: &BTreeMap<MerkleHash, Arc<MDBCASInfo>>,
) -> Result<(
(Vec<u64>, Vec<u32>), // CAS Lookup Info
(Vec<u64>, Vec<(u32, u32)>), // Chunk Lookup Info
usize, // Bytes used for CAS Content Info
)> {
// CAS info lookup table.
let mut cas_lookup_keys = Vec::<u64>::with_capacity(cas_content.len());
let mut cas_lookup_vals = Vec::<u32>::with_capacity(cas_content.len());
// Chunk lookup table.
let mut chunk_lookup_keys = Vec::<u64>::with_capacity(cas_content.len()); // may grow
let mut chunk_lookup_vals = Vec::<(u32, u32)>::with_capacity(cas_content.len()); // may grow
let mut index: u32 = 0;
let mut bytes_written = 0;
for (cas_hash, content) in cas_content {
cas_lookup_keys.push(truncate_hash(cas_hash));
cas_lookup_vals.push(index);
bytes_written += content.metadata.serialize(writer)?;
for (i, chunk) in content.chunks.iter().enumerate() {
bytes_written += chunk.serialize(writer)?;
chunk_lookup_keys.push(truncate_hash(&chunk.chunk_hash));
chunk_lookup_vals.push((index, i as u32));
}
index += 1 + content.chunks.len() as u32;
}
// Serialize a single bookend entry as a guard for sequential reading.
bytes_written += CASChunkSequenceHeader::bookend().serialize(writer)?;
// No need to sort cas_lookup_ because BTreeMap is ordered and we truncate by the first 8 bytes.
// Sort chunk lookup table by key.
let mut chunk_lookup_combined = chunk_lookup_keys.iter().zip(chunk_lookup_vals.iter()).collect::<Vec<_>>();
chunk_lookup_combined.sort_unstable_by_key(|&(k, _)| k);
Ok((
(cas_lookup_keys, cas_lookup_vals),
(
chunk_lookup_combined.iter().map(|&(k, _)| *k).collect(),
chunk_lookup_combined.iter().map(|&(_, v)| *v).collect(),
),
bytes_written,
))
}