mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2026-04-06 07:03:37 +00:00
Fix several typos
This commit is contained in:
parent
249861b65d
commit
2d141b54c5
5 changed files with 19 additions and 19 deletions
|
|
@ -101,7 +101,7 @@ group.add_argument('--tensor-split', type=str, default=None, help='Split the mod
|
|||
group.add_argument('--row-split', action='store_true', help='Split the model by rows across GPUs. This may improve multi-gpu performance.')
|
||||
group.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
|
||||
group.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
|
||||
group.add_argument('--no-kv-offload', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
|
||||
group.add_argument('--no-kv-offload', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces performance.')
|
||||
group.add_argument('--batch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the application level batch size.')
|
||||
group.add_argument('--ubatch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the max physical batch size for computation (device level).')
|
||||
group.add_argument('--threads', type=int, default=0, help='Number of threads to use.')
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue