mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2026-02-16 04:35:32 +01:00
llama.cpp: Explicitly send cache_prompt = True
This commit is contained in:
parent
195a45c6e1
commit
a6c3ec2299
|
|
@ -135,6 +135,7 @@ class LlamaServer:
|
|||
"prompt": token_ids,
|
||||
"n_predict": max_new_tokens,
|
||||
"stream": True,
|
||||
"cache_prompt": True
|
||||
})
|
||||
|
||||
if shared.args.verbose:
|
||||
|
|
|
|||
Loading…
Reference in a new issue