in Allura/allura/model/repository.py [0:0]
def last_commit_ids(self, commit, paths):
'''
Return a mapping {path: commit_id} of the _id of the last
commit to touch each path, starting from the given commit.
Chunks the set of paths based on lcd_thread_chunk_size and
runs each chunk (if more than one) in a separate thread.
Each thread will call :meth:`_get_last_commit` to get the
commit ID and list of changed files for the last commit
to touch any file in a given chunk.
'''
if not paths:
return {}
timeout = float(tg.config.get('lcd_timeout', 60))
start_time = time()
paths = list(set(paths)) # remove dupes
result = {} # will be appended to from each thread
chunks = Queue()
lcd_chunk_size = asint(tg.config.get('lcd_thread_chunk_size', 10))
num_threads = 0
for s in range(0, len(paths), lcd_chunk_size):
chunks.put(paths[s:s + lcd_chunk_size])
num_threads += 1
def get_ids():
paths = set(chunks.get())
try:
commit_id = commit._id
while paths and commit_id:
if time() - start_time >= timeout:
log.error('last_commit_ids timeout for %s on %s',
commit._id, ', '.join(paths))
break
commit_id, changes = self._get_last_commit(
commit._id, paths)
if commit_id is None:
break
changed = prefix_paths_union(paths, changes)
for path in changed:
result[path] = commit_id
paths -= changed
except Exception as e:
log.exception('Error in SCM thread: %s', e)
finally:
chunks.task_done()
if num_threads == 1:
get_ids()
else:
for i in range(num_threads):
t = Thread(target=get_ids)
t.start()
# reimplement chunks.join() but with a timeout
# see: http://bugs.python.org/issue9634
# (giving threads a bit of extra cleanup time in case they timeout)
chunks.all_tasks_done.acquire()
try:
endtime = time() + timeout + 0.5
while chunks.unfinished_tasks and endtime > time():
chunks.all_tasks_done.wait(endtime - time())
finally:
chunks.all_tasks_done.release()
return result