Some early cleanup of tokenizer

Prior to this the tokenizer ran "one ahead", where tokenizer_t::next()
would in fact return the last-parsed token. Switch to parsing on demand
instead of running one ahead; this is simpler and prepares for tokenizer
changes.
This commit is contained in:
ridiculousfish
2018-02-19 15:10:10 -08:00
parent 04c399c5a9
commit b13ee818d2
4 changed files with 74 additions and 72 deletions

View File

@@ -137,10 +137,10 @@ static void write_part(const wchar_t *begin, const wchar_t *end, int cut_at_curs
size_t pos = get_cursor_pos() - (begin - get_buffer());
if (tokenize) {
wchar_t *buff = wcsndup(begin, end - begin);
// fwprintf( stderr, L"Subshell: %ls, end char %lc\n", buff, *end );
wcstring out;
tokenizer_t tok(buff, TOK_ACCEPT_UNFINISHED);
wcstring buff(begin, end - begin);
tokenizer_t tok(buff.c_str(), TOK_ACCEPT_UNFINISHED);
tok_t token;
while (tok.next(&token)) {
if ((cut_at_cursor) && (token.offset + token.text.size() >= pos)) break;
@@ -154,7 +154,6 @@ static void write_part(const wchar_t *begin, const wchar_t *end, int cut_at_curs
}
streams.out.append(out);
free(buff);
} else {
if (cut_at_cursor) {
streams.out.append(begin, pos);