in dm-cache-target.c [2352:2546]
static int cache_create(struct cache_args *ca, struct cache **result)
{
int r = 0;
char **error = &ca->ti->error;
struct cache *cache;
struct dm_target *ti = ca->ti;
dm_block_t origin_blocks;
struct dm_cache_metadata *cmd;
bool may_format = ca->features.mode == CM_WRITE;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
return -ENOMEM;
cache->ti = ca->ti;
ti->private = cache;
ti->num_flush_bios = 2;
ti->flush_supported = true;
ti->num_discard_bios = 1;
ti->discards_supported = true;
ti->per_io_data_size = sizeof(struct per_bio_data);
cache->features = ca->features;
if (writethrough_mode(cache)) {
/* Create bioset for writethrough bios issued to origin */
r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
if (r)
goto bad;
}
cache->metadata_dev = ca->metadata_dev;
cache->origin_dev = ca->origin_dev;
cache->cache_dev = ca->cache_dev;
ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
origin_blocks = cache->origin_sectors = ca->origin_sectors;
origin_blocks = block_div(origin_blocks, ca->block_size);
cache->origin_blocks = to_oblock(origin_blocks);
cache->sectors_per_block = ca->block_size;
if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
r = -EINVAL;
goto bad;
}
if (ca->block_size & (ca->block_size - 1)) {
dm_block_t cache_size = ca->cache_sectors;
cache->sectors_per_block_shift = -1;
cache_size = block_div(cache_size, ca->block_size);
set_cache_size(cache, to_cblock(cache_size));
} else {
cache->sectors_per_block_shift = __ffs(ca->block_size);
set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
}
r = create_cache_policy(cache, ca, error);
if (r)
goto bad;
cache->policy_nr_args = ca->policy_argc;
cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
if (r) {
*error = "Error setting cache policy's config values";
goto bad;
}
cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
ca->block_size, may_format,
dm_cache_policy_get_hint_size(cache->policy),
ca->features.metadata_version);
if (IS_ERR(cmd)) {
*error = "Error creating metadata object";
r = PTR_ERR(cmd);
goto bad;
}
cache->cmd = cmd;
set_cache_mode(cache, CM_WRITE);
if (get_cache_mode(cache) != CM_WRITE) {
*error = "Unable to get write access to metadata, please check/repair metadata.";
r = -EINVAL;
goto bad;
}
if (passthrough_mode(cache)) {
bool all_clean;
r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
if (r) {
*error = "dm_cache_metadata_all_clean() failed";
goto bad;
}
if (!all_clean) {
*error = "Cannot enter passthrough mode unless all blocks are clean";
r = -EINVAL;
goto bad;
}
policy_allow_migrations(cache->policy, false);
}
spin_lock_init(&cache->lock);
bio_list_init(&cache->deferred_bios);
atomic_set(&cache->nr_allocated_migrations, 0);
atomic_set(&cache->nr_io_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
r = -ENOMEM;
atomic_set(&cache->nr_dirty, 0);
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
if (!cache->dirty_bitset) {
*error = "could not allocate dirty bitset";
goto bad;
}
clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
cache->discard_block_size =
calculate_discard_block_size(cache->sectors_per_block,
cache->origin_sectors);
cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
cache->discard_block_size));
cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
if (!cache->discard_bitset) {
*error = "could not allocate discard bitset";
goto bad;
}
clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(cache->copier)) {
*error = "could not create kcopyd client";
r = PTR_ERR(cache->copier);
goto bad;
}
cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
if (!cache->wq) {
*error = "could not create workqueue for metadata object";
goto bad;
}
INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
INIT_WORK(&cache->migration_worker, check_migrations);
INIT_DELAYED_WORK(&cache->waker, do_waker);
cache->prison = dm_bio_prison_create_v2(cache->wq);
if (!cache->prison) {
*error = "could not create bio prison";
goto bad;
}
r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
migration_cache);
if (r) {
*error = "Error creating cache's migration mempool";
goto bad;
}
cache->need_tick_bio = true;
cache->sized = false;
cache->invalidate = false;
cache->commit_requested = false;
cache->loaded_mappings = false;
cache->loaded_discards = false;
load_stats(cache);
atomic_set(&cache->stats.demotion, 0);
atomic_set(&cache->stats.promotion, 0);
atomic_set(&cache->stats.copies_avoided, 0);
atomic_set(&cache->stats.cache_cell_clash, 0);
atomic_set(&cache->stats.commit_count, 0);
atomic_set(&cache->stats.discard_count, 0);
spin_lock_init(&cache->invalidation_lock);
INIT_LIST_HEAD(&cache->invalidation_requests);
batcher_init(&cache->committer, commit_op, cache,
issue_op, cache, cache->wq);
dm_iot_init(&cache->tracker);
init_rwsem(&cache->background_work_lock);
prevent_background_work(cache);
*result = cache;
return 0;
bad:
destroy(cache);
return r;
}