in eden/scm/edenscm/mercurial/merge.py [0:0]
def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None, ancestors=None):
"""apply the merge action list to the working directory
wctx is the working copy context
mctx is the context to be merged into the working copy
Return a tuple of counts (updated, merged, removed, unresolved) that
describes how many files were affected by the update.
"""
perftrace.tracevalue("Actions", sum(len(v) for k, v in pycompat.iteritems(actions)))
updated, merged, removed = 0, 0, 0
ms = mergestate.clean(
repo,
node=wctx.p1().node(),
other=mctx.node(),
# Ancestor can include the working copy, so we use this helper:
ancestors=[scmutil.contextnodesupportingwdir(c) for c in ancestors]
if ancestors
else None,
labels=labels,
)
moves = []
for m, l in actions.items():
l.sort()
# 'cd' and 'dc' actions are treated like other merge conflicts
mergeactions = sorted(actions["cd"])
mergeactions.extend(sorted(actions["dc"]))
mergeactions.extend(actions["m"])
for f, args, msg in mergeactions:
f1, f2, fa, move, anc = args
if f1 is None:
fcl = filemerge.absentfilectx(wctx, fa)
else:
repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
fcl = wctx[f1]
if f2 is None:
fco = filemerge.absentfilectx(mctx, fa)
else:
fco = mctx[f2]
actx = repo[anc]
if fa in actx:
fca = actx[fa]
else:
# TODO: move to absentfilectx
fca = repo.filectx(f1, fileid=nullrev)
ms.add(fcl, fco, fca, f)
if f1 != f and move:
moves.append(f1)
# remove renamed files after safely stored
for f in moves:
if wctx[f].lexists():
repo.ui.debug("removing %s\n" % f)
wctx[f].audit()
wctx[f].remove()
numupdates = sum(len(l) for m, l in actions.items() if m != "k")
z = 0
def userustworker():
return "remotefilelog" in repo.requirements and not wctx.isinmemory()
rustworkers = userustworker()
# record path conflicts
with progress.bar(
repo.ui, _("updating"), _("files"), numupdates
) as prog, repo.ui.timesection("updateworker"):
for f, args, msg in actions["p"]:
f1, fo = args
s = repo.ui.status
s(
_(
"%s: path conflict - a file or link has the same name as a "
"directory\n"
)
% f
)
if fo == "l":
s(_("the local file has been renamed to %s\n") % f1)
else:
s(_("the remote file has been renamed to %s\n") % f1)
s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
ms.addpath(f, f1, fo)
z += 1
prog.value = (z, f)
# When merging in-memory, we can't support worker processes, so set the
# per-item cost at 0 in that case.
cost = 0 if wctx.isinmemory() else 0.001
# Flush any pending data to disk before forking workers, so the workers
# don't all flush duplicate data.
repo.commitpending()
# remove in parallel (must come before resolving path conflicts and
# getting)
if rustworkers:
# Removing lots of files very quickly is known to cause FSEvents to
# lose events which forces watchman to recrwawl the entire
# repository. For very large repository, this can take many
# minutes, slowing down all the other tools that rely on it. Thus
# add a config that can be tweaked to specifically reduce the
# amount of concurrency.
numworkers = repo.ui.configint(
"experimental", "numworkersremover", worker._numworkers(repo.ui)
)
remover = rustworker.removerworker(repo.wvfs.base, numworkers)
for f, args, msg in actions["r"] + actions["rg"]:
# The remove method will either return immediately or block if
# the internal worker queue is full.
remover.remove(f)
z += 1
prog.value = (z, f)
retry = remover.wait()
for f in retry:
repo.ui.debug("retrying %s\n" % f)
removeone(repo, wctx, f)
else:
for i, size, item in batchremove(repo, wctx, actions["r"] + actions["rg"]):
z += i
prog.value = (z, item)
# "rg" actions are counted in updated below
removed = len(actions["r"])
# resolve path conflicts (must come before getting)
for f, args, msg in actions["pr"]:
repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
(f0,) = args
if wctx[f0].lexists():
repo.ui.note(_("moving %s to %s\n") % (f0, f))
wctx[f].audit()
wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
wctx[f0].remove()
z += 1
prog.value = (z, f)
# get in parallel
writesize = 0
if rustworkers:
numworkers = repo.ui.configint(
"experimental", "numworkerswriter", worker._numworkers(repo.ui)
)
writer = rustworker.writerworker(
repo.fileslog.contentstore, repo.wvfs.base, numworkers
)
fctx = mctx.filectx
for f, (flags, backup), msg in actions["g"] + actions["rg"]:
fnode = fctx(f).filenode()
# The write method will either return immediately or block if
# the internal worker queue is full.
writer.write(f, fnode, flags)
z += 1
prog.value = (z, f)
writesize, retry = writer.wait()
for f, flag in retry:
repo.ui.debug("retrying %s\n" % f)
writesize += updateone(repo, fctx, wctx, f, flag)
else:
for i, size, item in batchget(
repo, mctx, wctx, actions["g"] + actions["rg"]
):
z += i
writesize += size
prog.value = (z, item)
updated = len(actions["g"]) + len(actions["rg"])
perftrace.tracebytes("Disk Writes", writesize)
# forget (manifest only, just log it) (must come first)
for f, args, msg in actions["f"]:
repo.ui.debug(" %s: %s -> f\n" % (f, msg))
z += 1
prog.value = (z, f)
# re-add (manifest only, just log it)
for f, args, msg in actions["a"]:
repo.ui.debug(" %s: %s -> a\n" % (f, msg))
z += 1
prog.value = (z, f)
# re-add/mark as modified (manifest only, just log it)
for f, args, msg in actions["am"]:
repo.ui.debug(" %s: %s -> am\n" % (f, msg))
z += 1
prog.value = (z, f)
# keep (noop, just log it)
for f, args, msg in actions["k"]:
repo.ui.debug(" %s: %s -> k\n" % (f, msg))
# no progress
# directory rename, move local
for f, args, msg in actions["dm"]:
repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
z += 1
prog.value = (z, f)
f0, flags = args
repo.ui.note(_("moving %s to %s\n") % (f0, f))
wctx[f].audit()
wctx[f].write(wctx.filectx(f0).data(), flags)
wctx[f0].remove()
updated += 1
# local directory rename, get
for f, args, msg in actions["dg"]:
repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
z += 1
prog.value = (z, f)
f0, flags = args
repo.ui.note(_("getting %s to %s\n") % (f0, f))
wctx[f].write(mctx.filectx(f0).data(), flags)
updated += 1
# exec
for f, args, msg in actions["e"]:
repo.ui.debug(" %s: %s -> e\n" % (f, msg))
z += 1
prog.value = (z, f)
(flags,) = args
wctx[f].audit()
wctx[f].setflags("l" in flags, "x" in flags)
updated += 1
perftrace.tracevalue("Deleted Files", removed)
perftrace.tracevalue("Written Files", updated)
# the ordering is important here -- ms.mergedriver will raise if the
# merge driver has changed, and we want to be able to bypass it when
# overwrite is True
usemergedriver = not overwrite and mergeactions and ms.mergedriver
if usemergedriver:
ms.commit()
with repo.ui.timesection("mergedriver"):
# This will return False if the function raises an exception.
failed = not driverpreprocess(repo, ms, wctx, labels=labels)
driverresolved = [f for f in ms.driverresolved()]
repo.ui.log("command_metrics", mergedriver_num_files=len(driverresolved))
# If preprocess() marked any files as driver-resolved and we're
# merging in-memory, abort on the assumption that driver scripts
# require the working directory.
if driverresolved and wctx.isinmemory():
errorstr = (
"some of your files require mergedriver to run, "
"which in-memory merge does not support"
)
raise error.InMemoryMergeConflictsError(
errorstr,
type=error.InMemoryMergeConflictsError.TYPE_MERGEDRIVER,
paths=driverresolved,
)
# NOTE(phillco): This used to say "the driver might leave some files unresolved",
# but this actually only handles the case where preprocess() fails. A preprocess()
# script can also leave files unmarked without failing.
unresolvedf = set(ms.unresolved())
if failed:
# Preprocess failed, so don't proceed in either case.
if wctx.isinmemory():
raise error.InMemoryMergeConflictsError(
"preprocess() raised an exception",
type=error.InMemoryMergeConflictsError.TYPE_FILE_CONFLICTS,
paths=list(unresolvedf),
)
else:
# XXX setting unresolved to at least 1 is a hack to make sure we
# error out
return updated, merged, removed, max(len(unresolvedf), 1)
newactions = []
for f, args, msg in mergeactions:
if f in unresolvedf:
newactions.append((f, args, msg))
mergeactions = newactions
try:
# premerge
tocomplete = []
completed = []
for f, args, msg in mergeactions:
repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
z += 1
prog.value = (z, f)
wctx[f].audit()
complete, r = ms.preresolve(f, wctx)
if not complete:
numupdates += 1
tocomplete.append((f, args, msg))
else:
completed.append(f)
# merge
files = []
for f, args, msg in tocomplete:
repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
z += 1
prog.value = (z, f)
ms.resolve(f, wctx)
files.append(f)
reponame = repo.ui.config("fbscmquery", "reponame")
command = " ".join(util.shellquote(a) for a in pycompat.sysargv)
repo.ui.log(
"manualmergefiles",
manual_merge_files=",".join(files),
auto_merge_files=",".join(completed),
command=command,
repo=reponame,
)
finally:
ms.commit()
unresolved = ms.unresolvedcount()
if usemergedriver and not unresolved and ms.mdstate() != "s":
with repo.ui.timesection("mergedriver"):
if not driverconclude(repo, ms, wctx, labels=labels):
# XXX setting unresolved to at least 1 is a hack to make
# sure we error out
unresolved = max(unresolved, 1)
ms.commit()
msupdated, msmerged, msremoved = ms.counts()
updated += msupdated
merged += msmerged
removed += msremoved
extraactions = ms.actions()
if extraactions:
# A same file might exist both in extraactions["r"] (to remove)
# list, and actions["g"] (to create) list. Remove them from
# actions["g"] to avoid conflicts.
extraremoved = {item[0] for item in extraactions["r"]}
if extraremoved:
actions["g"] = [
item for item in actions["g"] if item[0] not in extraremoved
]
mfiles = set(a[0] for a in actions["m"])
for k, acts in pycompat.iteritems(extraactions):
actions[k].extend(acts)
# Remove these files from actions['m'] as well. This is
# important because in recordupdates, files in actions['m'] are
# processed after files in other actions, and the merge driver
# might add files to those actions via extraactions above. This
# can lead to a file being recorded twice, with poor results.
# This is especially problematic for actions['r'] (currently
# only possible with the merge driver in the initial merge
# process; interrupted merges don't go through this flow).
#
# The real fix here is to have indexes by both file and action
# so that when the action for a file is changed it is
# automatically reflected in the other action lists. But that
# involves a more complex data structure, so this will do for
# now.
#
# We don't need to do the same operation for 'dc' and 'cd'
# because those lists aren't consulted again.
mfiles.difference_update(a[0] for a in acts)
actions["m"] = [a for a in actions["m"] if a[0] in mfiles]
return updated, merged, removed, unresolved