static int do_write_index()

in read-cache.c [2829:3133]


static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
			  enum write_extensions write_extensions, unsigned flags)
{
	uint64_t start = getnanotime();
	struct hashfile *f;
	struct git_hash_ctx *eoie_c = NULL;
	struct cache_header hdr;
	int i, err = 0, removed, extended, hdr_version;
	struct cache_entry **cache = istate->cache;
	int entries = istate->cache_nr;
	struct stat st;
	struct ondisk_cache_entry ondisk;
	struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
	int drop_cache_tree = istate->drop_cache_tree;
	off_t offset;
	int csum_fsync_flag;
	int ieot_entries = 1;
	struct index_entry_offset_table *ieot = NULL;
	struct repository *r = istate->repo;
	struct strbuf sb = STRBUF_INIT;
	int nr, nr_threads, ret;

	f = hashfd(the_repository->hash_algo, tempfile->fd, tempfile->filename.buf);

	prepare_repo_settings(r);
	f->skip_hash = r->settings.index_skip_hash;

	for (i = removed = extended = 0; i < entries; i++) {
		if (cache[i]->ce_flags & CE_REMOVE)
			removed++;

		/* reduce extended entries if possible */
		cache[i]->ce_flags &= ~CE_EXTENDED;
		if (cache[i]->ce_flags & CE_EXTENDED_FLAGS) {
			extended++;
			cache[i]->ce_flags |= CE_EXTENDED;
		}
	}

	if (!istate->version)
		istate->version = get_index_format_default(r);

	/* demote version 3 to version 2 when the latter suffices */
	if (istate->version == 3 || istate->version == 2)
		istate->version = extended ? 3 : 2;

	hdr_version = istate->version;

	hdr.hdr_signature = htonl(CACHE_SIGNATURE);
	hdr.hdr_version = htonl(hdr_version);
	hdr.hdr_entries = htonl(entries - removed);

	hashwrite(f, &hdr, sizeof(hdr));

	if (!HAVE_THREADS || repo_config_get_index_threads(the_repository, &nr_threads))
		nr_threads = 1;

	if (nr_threads != 1 && record_ieot()) {
		int ieot_blocks, cpus;

		/*
		 * ensure default number of ieot blocks maps evenly to the
		 * default number of threads that will process them leaving
		 * room for the thread to load the index extensions.
		 */
		if (!nr_threads) {
			ieot_blocks = istate->cache_nr / THREAD_COST;
			cpus = online_cpus();
			if (ieot_blocks > cpus - 1)
				ieot_blocks = cpus - 1;
		} else {
			ieot_blocks = nr_threads;
			if (ieot_blocks > istate->cache_nr)
				ieot_blocks = istate->cache_nr;
		}

		/*
		 * no reason to write out the IEOT extension if we don't
		 * have enough blocks to utilize multi-threading
		 */
		if (ieot_blocks > 1) {
			ieot = xcalloc(1, sizeof(struct index_entry_offset_table)
				+ (ieot_blocks * sizeof(struct index_entry_offset)));
			ieot_entries = DIV_ROUND_UP(entries, ieot_blocks);
		}
	}

	offset = hashfile_total(f);

	nr = 0;
	previous_name = (hdr_version == 4) ? &previous_name_buf : NULL;

	for (i = 0; i < entries; i++) {
		struct cache_entry *ce = cache[i];
		if (ce->ce_flags & CE_REMOVE)
			continue;
		if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce))
			ce_smudge_racily_clean_entry(istate, ce);
		if (is_null_oid(&ce->oid)) {
			static const char msg[] = "cache entry has null sha1: %s";
			static int allow = -1;

			if (allow < 0)
				allow = git_env_bool("GIT_ALLOW_NULL_SHA1", 0);
			if (allow)
				warning(msg, ce->name);
			else
				err = error(msg, ce->name);

			drop_cache_tree = 1;
		}
		if (ieot && i && (i % ieot_entries == 0)) {
			ieot->entries[ieot->nr].nr = nr;
			ieot->entries[ieot->nr].offset = offset;
			ieot->nr++;
			/*
			 * If we have a V4 index, set the first byte to an invalid
			 * character to ensure there is nothing common with the previous
			 * entry
			 */
			if (previous_name)
				previous_name->buf[0] = 0;
			nr = 0;

			offset = hashfile_total(f);
		}
		if (ce_write_entry(f, ce, previous_name, (struct ondisk_cache_entry *)&ondisk) < 0)
			err = -1;

		if (err)
			break;
		nr++;
	}
	if (ieot && nr) {
		ieot->entries[ieot->nr].nr = nr;
		ieot->entries[ieot->nr].offset = offset;
		ieot->nr++;
	}
	strbuf_release(&previous_name_buf);

	if (err) {
		ret = err;
		goto out;
	}

	offset = hashfile_total(f);

	/*
	 * The extension headers must be hashed on their own for the
	 * EOIE extension. Create a hashfile here to compute that hash.
	 */
	if (offset && record_eoie()) {
		CALLOC_ARRAY(eoie_c, 1);
		the_hash_algo->init_fn(eoie_c);
	}

	/*
	 * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we
	 * can minimize the number of extensions we have to scan through to
	 * find it during load.  Write it out regardless of the
	 * strip_extensions parameter as we need it when loading the shared
	 * index.
	 */
	if (ieot) {
		strbuf_reset(&sb);

		write_ieot_extension(&sb, ieot);
		err = write_index_ext_header(f, eoie_c, CACHE_EXT_INDEXENTRYOFFSETTABLE, sb.len) < 0;
		hashwrite(f, sb.buf, sb.len);
		if (err) {
			ret = -1;
			goto out;
		}
	}

	if (write_extensions & WRITE_SPLIT_INDEX_EXTENSION &&
	    istate->split_index) {
		strbuf_reset(&sb);

		if (istate->sparse_index)
			die(_("cannot write split index for a sparse index"));

		err = write_link_extension(&sb, istate) < 0 ||
			write_index_ext_header(f, eoie_c, CACHE_EXT_LINK,
					       sb.len) < 0;
		hashwrite(f, sb.buf, sb.len);
		if (err) {
			ret = -1;
			goto out;
		}
	}
	if (write_extensions & WRITE_CACHE_TREE_EXTENSION &&
	    !drop_cache_tree && istate->cache_tree) {
		strbuf_reset(&sb);

		cache_tree_write(&sb, istate->cache_tree);
		err = write_index_ext_header(f, eoie_c, CACHE_EXT_TREE, sb.len) < 0;
		hashwrite(f, sb.buf, sb.len);
		if (err) {
			ret = -1;
			goto out;
		}
	}
	if (write_extensions & WRITE_RESOLVE_UNDO_EXTENSION &&
	    istate->resolve_undo) {
		strbuf_reset(&sb);

		resolve_undo_write(&sb, istate->resolve_undo, the_hash_algo);
		err = write_index_ext_header(f, eoie_c, CACHE_EXT_RESOLVE_UNDO,
					     sb.len) < 0;
		hashwrite(f, sb.buf, sb.len);
		if (err) {
			ret = -1;
			goto out;
		}
	}
	if (write_extensions & WRITE_UNTRACKED_CACHE_EXTENSION &&
	    istate->untracked) {
		strbuf_reset(&sb);

		write_untracked_extension(&sb, istate->untracked);
		err = write_index_ext_header(f, eoie_c, CACHE_EXT_UNTRACKED,
					     sb.len) < 0;
		hashwrite(f, sb.buf, sb.len);
		if (err) {
			ret = -1;
			goto out;
		}
	}
	if (write_extensions & WRITE_FSMONITOR_EXTENSION &&
	    istate->fsmonitor_last_update) {
		strbuf_reset(&sb);

		write_fsmonitor_extension(&sb, istate);
		err = write_index_ext_header(f, eoie_c, CACHE_EXT_FSMONITOR, sb.len) < 0;
		hashwrite(f, sb.buf, sb.len);
		if (err) {
			ret = -1;
			goto out;
		}
	}
	if (istate->sparse_index) {
		if (write_index_ext_header(f, eoie_c, CACHE_EXT_SPARSE_DIRECTORIES, 0) < 0) {
			ret = -1;
			goto out;
		}
	}

	/*
	 * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1
	 * so that it can be found and processed before all the index entries are
	 * read.  Write it out regardless of the strip_extensions parameter as we need it
	 * when loading the shared index.
	 */
	if (eoie_c) {
		strbuf_reset(&sb);

		write_eoie_extension(&sb, eoie_c, offset);
		err = write_index_ext_header(f, NULL, CACHE_EXT_ENDOFINDEXENTRIES, sb.len) < 0;
		hashwrite(f, sb.buf, sb.len);
		if (err) {
			ret = -1;
			goto out;
		}
	}

	csum_fsync_flag = 0;
	if (!alternate_index_output && (flags & COMMIT_LOCK))
		csum_fsync_flag = CSUM_FSYNC;

	finalize_hashfile(f, istate->oid.hash, FSYNC_COMPONENT_INDEX,
			  CSUM_HASH_IN_STREAM | csum_fsync_flag);
	f = NULL;

	if (close_tempfile_gently(tempfile)) {
		ret = error(_("could not close '%s'"), get_tempfile_path(tempfile));
		goto out;
	}
	if (stat(get_tempfile_path(tempfile), &st)) {
		ret = -1;
		goto out;
	}
	istate->timestamp.sec = (unsigned int)st.st_mtime;
	istate->timestamp.nsec = ST_MTIME_NSEC(st);
	trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);

	/*
	 * TODO trace2: replace "the_repository" with the actual repo instance
	 * that is associated with the given "istate".
	 */
	trace2_data_intmax("index", the_repository, "write/version",
			   istate->version);
	trace2_data_intmax("index", the_repository, "write/cache_nr",
			   istate->cache_nr);

	ret = 0;

out:
	if (f)
		free_hashfile(f);
	strbuf_release(&sb);
	free(eoie_c);
	free(ieot);
	return ret;
}