the token array to search in.
the index of the token inside the given tokens array which contains the character specified at the given byte. This will be the first token that is tok.index == bytes or before the next token that is too far. If no tokens match, this will return tokens.length.
This is equivalent to the following code:
foreach (i, tok; tokens) { if (tok.index == bytes) return i; else if (tok.index > bytes) return i - 1; } return tokens.length;
StringCache stringCache = StringCache(StringCache.defaultBucketCount); const(Token)[] tokens = getTokensForParser(cast(ubyte[]) `module foo.bar; // ok void main(string[] args) { } /// documentation void foo() { } `, LexerConfig.init, &stringCache); auto get(size_t bytes) { auto i = tokens.tokenIndexAtByteIndex(bytes); if (i == tokens.length) return tok!"__EOF__"; return tokens[i].type; } assert(get(0) == tok!"module"); assert(get(4) == tok!"module"); assert(get(6) == tok!"module"); assert(get(7) == tok!"identifier"); assert(get(9) == tok!"identifier"); assert(get(10) == tok!"."); assert(get(11) == tok!"identifier"); assert(get(16) == tok!";"); assert(get(49) == tok!"{"); assert(get(48) == tok!"{"); assert(get(47) == tok!")"); assert(get(1000) == tok!"__EOF__"); // TODO: process trivia fields in libdparse >=0.15.0 when it releases //assert(get(20) == tok!"comment"); assert(get(20) == tok!";"); // assert(get(57) == tok!"comment");
Performs a binary search to find the token containing the search location.