in common/buildlogger/internal/tokensanitizer/token_masker.go [103:202]
func (m *tokenSanitizer) Write(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
// fast path: if the write is "[MASKED]" from an upper-level, don't bother
// processing it, send it to the next writer.
if bytes.Equal(p, mask) {
return m.next.Write(p)
}
var last int
for n < len(p) {
if m.matching == len(m.prefix) {
if alphabet[p[n]] {
m.masked = true
n++
last = n
continue
}
if m.masked {
m.masked = false
_, err := m.next.Write(mask)
if err != nil {
return n, err
}
}
m.matching = 0
}
// optimization: use the faster IndexByte to jump to the start of a
// potential prefix and if not found, advance the whole buffer.
if m.matching == 0 {
off := bytes.IndexByte(p[n:], m.prefix[0])
if off < 0 {
n += len(p[n:])
break
}
if off > -1 {
n += off
}
}
// find out how much data we can match: the minimum of len(p) and the
// remainder of the prefix.
min := len(m.prefix[m.matching:])
if len(p[n:]) < min {
min = len(p[n:])
}
// try to match the next part of the prefix
if bytes.HasPrefix(p[n:], m.prefix[m.matching:m.matching+min]) {
// send any data that we've not sent prior to our match to the
// next writer.
_, err = m.next.Write(p[last:n])
if err != nil {
return n, err
}
m.matching += min
n += min
last = n
if m.matching == len(m.prefix) {
_, err := m.next.Write(m.prefix[:m.matching])
if err != nil {
return n, err
}
}
continue
}
// if we didn't complete a prefix match, send the tracked bytes of
// the prefix to the next writer unmodified.
if m.matching > 0 {
_, err = m.next.Write(m.prefix[:m.matching])
if err != nil {
return n, err
}
// if the end of this prefix matches the start of it, try again
if m.prefix[0] == p[n] {
m.matching = 1
last++
n++
continue
}
}
m.matching = 0
n++
}
// any unmatched data is sent to the next writer
_, err = m.next.Write(p[last:n])
return n, err
}