in src/lib.rs [290:327]
fn _increase_last_piece_token_len(
&self,
tokens: Vec<Rank>,
mut last_piece_token_len: usize,
) -> (Vec<Rank>, usize) {
// Unfortunately, the locations where our regex splits can be unstable.
// For the purposes of determining unstable tokens, unstable regex splitting
// is only a problem if a split that was present disappears, since this can
// lead to merging of tokens otherwise thought to be stable.
// cl100k_base makes our life hard by including the \s*[\r\n]+
// pattern. This can e.g. cause "\n" + " " to become "\n \n".
// Here is a quick and dirty fix:
{
let token_is_all_space = |token| {
self.decoder
.get(token)
.map(|token_bytes| {
token_bytes
.iter()
.rev()
.all(|&b| [b' ', b'\n', b'\t'].contains(&b))
})
.unwrap_or(false)
};
if last_piece_token_len > 0
&& token_is_all_space(&tokens[tokens.len() - last_piece_token_len])
{
while (last_piece_token_len < tokens.len())
&& token_is_all_space(&tokens[tokens.len() - last_piece_token_len - 1])
{
last_piece_token_len += 1;
}
}
}
debug_assert!(last_piece_token_len <= tokens.len());
(tokens, last_piece_token_len)
}