From c93f1fa99be9338eeb1a98547dda8a23033f6ae2 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sat, 4 Mar 2023 03:10:21 -0300 Subject: [PATCH] Count the tokens more conservatively --- modules/text_generation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/text_generation.py b/modules/text_generation.py index a7d41b84..f9082a31 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -23,9 +23,9 @@ def get_max_prompt_length(tokens): def encode(prompt, tokens_to_generate=0, add_special_tokens=True): # These models do not have explicit tokenizers for now, so - # we return an estimate on the number of tokens + # we return an estimate for the number of tokens if shared.is_RWKV or shared.is_LLaMA: - return np.zeros((1, len(prompt)//5)) + return np.zeros((1, len(prompt)//4)) input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens) if shared.args.cpu: