in eden/scm/edenscm/hgext/remotefilelog/__init__.py [0:0]
def onetimeclientsetup(ui):
global clientonetime
if clientonetime:
return
clientonetime = True
if util.safehasattr(changegroup, "_addchangegroupfiles"):
fn = "_addchangegroupfiles" # hg >= 3.6
else:
fn = "addchangegroupfiles" # hg <= 3.5
wrapfunction(changegroup, fn, shallowbundle.addchangegroupfiles)
if util.safehasattr(changegroup, "getchangegroup"):
wrapfunction(changegroup, "getchangegroup", shallowbundle.getchangegroup)
else:
wrapfunction(changegroup, "makechangegroup", shallowbundle.makechangegroup)
def storewrapper(orig, requirements, path, vfstype, *args):
s = orig(requirements, path, vfstype, *args)
if shallowrepo.requirement in requirements:
s = shallowstore.wrapstore(s)
return s
wrapfunction(store, "store", storewrapper)
extensions.wrapfunction(exchange, "pull", exchangepull)
# prefetch files before update
def applyupdates(
orig, repo, actions, wctx, mctx, overwrite, labels=None, ancestors=None
):
if shallowrepo.requirement in repo.requirements:
manifest = mctx.manifest()
files = []
for f, args, msg in actions["g"]:
files.append((f, hex(manifest[f])))
# batch fetch the needed files from the server
repo.fileservice.prefetch(files, fetchhistory=False)
return orig(
repo, actions, wctx, mctx, overwrite, labels=labels, ancestors=ancestors
)
wrapfunction(merge, "applyupdates", applyupdates)
# Prefetch merge checkunknownfiles
def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs):
if shallowrepo.requirement in repo.requirements:
files = []
sparsematch = repo.maybesparsematch(mctx.rev())
for f, (m, actionargs, msg) in pycompat.iteritems(actions):
if sparsematch and not sparsematch(f):
continue
if m in ("c", "dc", "cm"):
files.append((f, hex(mctx.filenode(f))))
elif m == "dg":
f2 = actionargs[0]
files.append((f2, hex(mctx.filenode(f2))))
# We need history for the files so we can compute the sha(p1, p2,
# text) for the files on disk. This will unfortunately fetch all the
# history for the files, which is excessive. In the future we should
# change this to fetch the sha256 and size, then we can avoid p1, p2
# entirely.
repo.fileservice.prefetch(files, fetchdata=False, fetchhistory=True)
return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
wrapfunction(merge, "_checkunknownfiles", checkunknownfiles)
# Prefetch the logic that compares added and removed files for renames
def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
if shallowrepo.requirement in repo.requirements:
files = []
parentctx = repo["."]
m1 = parentctx.manifest()
for f in removed:
if f in m1:
files.append((f, hex(parentctx.filenode(f))))
# batch fetch the needed files from the server
repo.fileservice.prefetch(files)
return orig(repo, matcher, added, removed, *args, **kwargs)
wrapfunction(scmutil, "_findrenames", findrenames)
# prefetch files before mergecopies check
def computenonoverlap(orig, repo, c1, c2, *args, **kwargs):
u1, u2 = orig(repo, c1, c2, *args, **kwargs)
if shallowrepo.requirement in repo.requirements:
m1 = c1.manifest()
m2 = c2.manifest()
files = []
sparsematch1 = repo.maybesparsematch(c1.rev())
if sparsematch1:
sparseu1 = []
for f in u1:
if sparsematch1(f):
files.append((f, hex(m1[f])))
sparseu1.append(f)
u1 = sparseu1
sparsematch2 = repo.maybesparsematch(c2.rev())
if sparsematch2:
sparseu2 = []
for f in u2:
if sparsematch2(f):
files.append((f, hex(m2[f])))
sparseu2.append(f)
u2 = sparseu2
# batch fetch the needed files from the server
repo.fileservice.prefetch(files)
return u1, u2
wrapfunction(copies, "_computenonoverlap", computenonoverlap)
# prefetch files before pathcopies check
def computeforwardmissing(orig, a, b, match=None):
missing = list(orig(a, b, match=match))
repo = a._repo
if shallowrepo.requirement in repo.requirements:
mb = b.manifest()
files = []
sparsematch = repo.maybesparsematch(b.rev())
if sparsematch:
sparsemissing = []
for f in missing:
if sparsematch(f):
files.append((f, hex(mb[f])))
sparsemissing.append(f)
missing = sparsemissing
# batch fetch the needed files from the server
repo.fileservice.prefetch(files)
return missing
wrapfunction(copies, "_computeforwardmissing", computeforwardmissing)
# prefetch files before archiving
def computefiles(orig, ctx, matchfn):
files = orig(ctx, matchfn)
repo = ctx._repo
if shallowrepo.requirement in repo.requirements:
# Don't run on memory commits, since they may contain files without
# hashes, which can screw up prefetch.
if ctx.node() is not None:
mf = ctx.manifest()
repo.fileservice.prefetch(list((f, hex(mf.get(f))) for f in files))
return files
wrapfunction(archival, "computefiles", computefiles)
# disappointing hacks below
templatekw.getrenamedfn = getrenamedfn
wrapfunction(revset, "filelog", filelogrevset)
revset.symbols["filelog"] = revset.filelog
wrapfunction(cmdutil, "walkfilerevs", walkfilerevs)
# prevent strip from stripping remotefilelogs
def _collectbrokencsets(orig, repo, files, striprev):
if shallowrepo.requirement in repo.requirements:
files = list([f for f in files if not repo.shallowmatch(f)])
return orig(repo, files, striprev)
wrapfunction(repair, "_collectbrokencsets", _collectbrokencsets)
# Don't commit filelogs until we know the commit hash, since the hash
# is present in the filelog blob.
# This violates Mercurial's filelog->manifest->changelog write order,
# but is generally fine for client repos.
pendingfilecommits = []
def addrawrevision(
orig,
self,
rawtext,
transaction,
link,
p1,
p2,
node,
flags,
cachedelta=None,
_metatuple=None,
):
if isint(link):
pendingfilecommits.append(
(
self,
rawtext,
transaction,
link,
p1,
p2,
node,
flags,
cachedelta,
_metatuple,
)
)
return node
else:
return orig(
self,
rawtext,
transaction,
link,
p1,
p2,
node,
flags,
cachedelta,
_metatuple=_metatuple,
)
wrapfunction(remotefilelog.remotefilelog, "addrawrevision", addrawrevision)
def changelogadd(orig, self, *args):
oldlen = len(self)
node = orig(self, *args)
newlen = len(self)
if oldlen != newlen:
linknode = node
for oldargs in pendingfilecommits:
log, rt, tr, _link, p1, p2, n, fl, c, m = oldargs
log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
else:
# "link" is actually wrong here (it is set to len(changelog))
# if changelog remains unchanged, skip writing file revisions
# but still do a sanity check about pending multiple revisions
if len(set(x[3] for x in pendingfilecommits)) > 1:
raise error.ProgrammingError(
"pending multiple integer revisions are not supported"
)
del pendingfilecommits[:]
return node
wrapfunction(changelog2.changelog, "add", changelogadd)
# changectx wrappers
def filectx(orig, self, path, fileid=None, filelog=None):
if fileid is None:
fileid = self.filenode(path)
if (
shallowrepo.requirement in self._repo.requirements
and self._repo.shallowmatch(path)
):
return remotefilectx.remotefilectx(
self._repo, path, fileid=fileid, changectx=self, filelog=filelog
)
return orig(self, path, fileid=fileid, filelog=filelog)
wrapfunction(context.changectx, "filectx", filectx)
def workingfilectx(orig, self, path, filelog=None):
if (
shallowrepo.requirement in self._repo.requirements
and self._repo.shallowmatch(path)
):
return remotefilectx.remoteworkingfilectx(
self._repo, path, workingctx=self, filelog=filelog
)
return orig(self, path, filelog=filelog)
wrapfunction(context.workingctx, "filectx", workingfilectx)
# prefetch required revisions before a diff
def trydiff(
orig,
repo,
revs,
ctx1,
ctx2,
modified,
added,
removed,
copy,
getfilectx,
*args,
**kwargs
):
if shallowrepo.requirement in repo.requirements:
prefetch = []
mf1 = ctx1.manifest()
for fname in modified + added + removed:
if fname in mf1:
fnode = getfilectx(fname, ctx1).filenode()
# fnode can be None if it's a edited working ctx file
if fnode:
prefetch.append((fname, hex(fnode)))
if fname not in removed:
fnode = getfilectx(fname, ctx2).filenode()
if fnode:
prefetch.append((fname, hex(fnode)))
repo.fileservice.prefetch(prefetch)
return orig(
repo,
revs,
ctx1,
ctx2,
modified,
added,
removed,
copy,
getfilectx,
*args,
**kwargs
)
wrapfunction(patch, "trydiff", trydiff)
if util.safehasattr(cmdutil, "_revertprefetch"):
wrapfunction(cmdutil, "_revertprefetch", _revertprefetch)
else:
wrapfunction(cmdutil, "revert", revert)
def writenewbundle(
orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
):
if shallowrepo.requirement in repo.requirements:
uploadblobs(repo, outgoing.missing)
return orig(ui, repo, source, filename, bundletype, outgoing, *args, **kwargs)
# when writing a bundle via "hg bundle" command, upload related LFS blobs
wrapfunction(bundle2, "writenewbundle", writenewbundle)
if ui.configbool("remotefilelog", "lfs"):
# Make bundle choose changegroup3 instead of changegroup2. This affects
# "hg bundle" command. Note: it does not cover all bundle formats like
# "packed1". Using "packed1" with lfs will likely cause trouble.
names = [k for k, v in exchange._bundlespeccgversions.items() if v == "02"]
for k in names:
exchange._bundlespeccgversions[k] = "03"