mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2026-02-09 01:04:27 +01:00
Minor improvement while running custom models
This commit is contained in:
parent
24c4c20391
commit
09a7c36e1b
|
|
@ -97,6 +97,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||
print(f"Output generated in {(t1-t0):.2f} seconds.")
|
||||
yield formatted_outputs(reply, shared.model_name)
|
||||
else:
|
||||
yield formatted_outputs(question, shared.model_name)
|
||||
for i in tqdm(range(max_new_tokens//8+1)):
|
||||
clear_torch_cache()
|
||||
reply = shared.model.generate(question, token_count=8, temperature=temperature, top_p=top_p)
|
||||
|
|
|
|||
Loading…
Reference in a new issue