mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2025-12-06 07:12:10 +01:00
Improve the --help formatting a bit
This commit is contained in:
parent
6acb0e1bee
commit
3a207e7a57
17
README.md
17
README.md
|
|
@ -186,9 +186,9 @@ usage: server.py [-h] [--multi-user] [--character CHARACTER] [--model MODEL] [--
|
||||||
[--extensions EXTENSIONS [EXTENSIONS ...]] [--verbose] [--idle-timeout IDLE_TIMEOUT] [--loader LOADER] [--cpu] [--cpu-memory CPU_MEMORY] [--disk] [--disk-cache-dir DISK_CACHE_DIR]
|
[--extensions EXTENSIONS [EXTENSIONS ...]] [--verbose] [--idle-timeout IDLE_TIMEOUT] [--loader LOADER] [--cpu] [--cpu-memory CPU_MEMORY] [--disk] [--disk-cache-dir DISK_CACHE_DIR]
|
||||||
[--load-in-8bit] [--bf16] [--no-cache] [--trust-remote-code] [--force-safetensors] [--no_use_fast] [--use_flash_attention_2] [--use_eager_attention] [--torch-compile] [--load-in-4bit]
|
[--load-in-8bit] [--bf16] [--no-cache] [--trust-remote-code] [--force-safetensors] [--no_use_fast] [--use_flash_attention_2] [--use_eager_attention] [--torch-compile] [--load-in-4bit]
|
||||||
[--use_double_quant] [--compute_dtype COMPUTE_DTYPE] [--quant_type QUANT_TYPE] [--flash-attn] [--threads THREADS] [--threads-batch THREADS_BATCH] [--batch-size BATCH_SIZE] [--no-mmap]
|
[--use_double_quant] [--compute_dtype COMPUTE_DTYPE] [--quant_type QUANT_TYPE] [--flash-attn] [--threads THREADS] [--threads-batch THREADS_BATCH] [--batch-size BATCH_SIZE] [--no-mmap]
|
||||||
[--mlock] [--n-gpu-layers N_GPU_LAYERS] [--tensor-split TENSOR_SPLIT] [--numa] [--no-kv-offload] [--row-split] [--extra-flags EXTRA_FLAGS] [--streaming-llm] [--ctx-size CTX_SIZE]
|
[--mlock] [--n-gpu-layers N_GPU_LAYERS] [--tensor-split TENSOR_SPLIT] [--numa] [--no-kv-offload] [--row-split] [--extra-flags EXTRA_FLAGS] [--streaming-llm] [--ctx-size N]
|
||||||
[--model-draft MODEL_DRAFT] [--draft-max DRAFT_MAX] [--gpu-layers-draft GPU_LAYERS_DRAFT] [--device-draft DEVICE_DRAFT] [--ctx-size-draft CTX_SIZE_DRAFT] [--gpu-split GPU_SPLIT]
|
[--model-draft MODEL_DRAFT] [--draft-max DRAFT_MAX] [--gpu-layers-draft GPU_LAYERS_DRAFT] [--device-draft DEVICE_DRAFT] [--ctx-size-draft CTX_SIZE_DRAFT] [--gpu-split GPU_SPLIT]
|
||||||
[--autosplit] [--cfg-cache] [--no_flash_attn] [--no_xformers] [--no_sdpa] [--num_experts_per_token NUM_EXPERTS_PER_TOKEN] [--enable_tp] [--hqq-backend HQQ_BACKEND] [--cpp-runner]
|
[--autosplit] [--cfg-cache] [--no_flash_attn] [--no_xformers] [--no_sdpa] [--num_experts_per_token N] [--enable_tp] [--hqq-backend HQQ_BACKEND] [--cpp-runner]
|
||||||
[--cache_type CACHE_TYPE] [--deepspeed] [--nvme-offload-dir NVME_OFFLOAD_DIR] [--local_rank LOCAL_RANK] [--alpha_value ALPHA_VALUE] [--rope_freq_base ROPE_FREQ_BASE]
|
[--cache_type CACHE_TYPE] [--deepspeed] [--nvme-offload-dir NVME_OFFLOAD_DIR] [--local_rank LOCAL_RANK] [--alpha_value ALPHA_VALUE] [--rope_freq_base ROPE_FREQ_BASE]
|
||||||
[--compress_pos_emb COMPRESS_POS_EMB] [--listen] [--listen-port LISTEN_PORT] [--listen-host LISTEN_HOST] [--share] [--auto-launch] [--gradio-auth GRADIO_AUTH]
|
[--compress_pos_emb COMPRESS_POS_EMB] [--listen] [--listen-port LISTEN_PORT] [--listen-host LISTEN_HOST] [--share] [--auto-launch] [--gradio-auth GRADIO_AUTH]
|
||||||
[--gradio-auth-path GRADIO_AUTH_PATH] [--ssl-keyfile SSL_KEYFILE] [--ssl-certfile SSL_CERTFILE] [--subpath SUBPATH] [--old-colors] [--api] [--public-api]
|
[--gradio-auth-path GRADIO_AUTH_PATH] [--ssl-keyfile SSL_KEYFILE] [--ssl-certfile SSL_CERTFILE] [--subpath SUBPATH] [--old-colors] [--api] [--public-api]
|
||||||
|
|
@ -207,15 +207,15 @@ Basic settings:
|
||||||
--model-dir MODEL_DIR Path to directory with all the models.
|
--model-dir MODEL_DIR Path to directory with all the models.
|
||||||
--lora-dir LORA_DIR Path to directory with all the loras.
|
--lora-dir LORA_DIR Path to directory with all the loras.
|
||||||
--model-menu Show a model menu in the terminal when the web UI is first launched.
|
--model-menu Show a model menu in the terminal when the web UI is first launched.
|
||||||
--settings SETTINGS Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml,
|
--settings SETTINGS Load the default interface settings from this yaml file. See user_data/settings-template.yaml for an example. If you create a file called
|
||||||
this file will be loaded by default without the need to use the --settings flag.
|
user_data/settings.yaml, this file will be loaded by default without the need to use the --settings flag.
|
||||||
--extensions EXTENSIONS [EXTENSIONS ...] The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.
|
--extensions EXTENSIONS [EXTENSIONS ...] The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.
|
||||||
--verbose Print the prompts to the terminal.
|
--verbose Print the prompts to the terminal.
|
||||||
--idle-timeout IDLE_TIMEOUT Unload model after this many minutes of inactivity. It will be automatically reloaded when you try to use it again.
|
--idle-timeout IDLE_TIMEOUT Unload model after this many minutes of inactivity. It will be automatically reloaded when you try to use it again.
|
||||||
|
|
||||||
Model loader:
|
Model loader:
|
||||||
--loader LOADER Choose the model loader manually, otherwise, it will get autodetected. Valid options: Transformers, llama.cpp, ExLlamav3_HF, ExLlamav2_HF,
|
--loader LOADER Choose the model loader manually, otherwise, it will get autodetected. Valid options: Transformers, llama.cpp, ExLlamav3_HF, ExLlamav2_HF, ExLlamav2, HQQ,
|
||||||
ExLlamav2, HQQ, TensorRT-LLM.
|
TensorRT-LLM.
|
||||||
|
|
||||||
Transformers/Accelerate:
|
Transformers/Accelerate:
|
||||||
--cpu Use the CPU to generate text. Warning: Training on CPU is extremely slow.
|
--cpu Use the CPU to generate text. Warning: Training on CPU is extremely slow.
|
||||||
|
|
@ -254,8 +254,7 @@ llama.cpp:
|
||||||
--streaming-llm Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.
|
--streaming-llm Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.
|
||||||
|
|
||||||
Context and cache management:
|
Context and cache management:
|
||||||
--ctx-size CTX_SIZE, --n_ctx CTX_SIZE, --max_seq_len CTX_SIZE
|
--ctx-size N, --n_ctx N, --max_seq_len N Context size in tokens.
|
||||||
Context size in tokens.
|
|
||||||
|
|
||||||
Speculative decoding:
|
Speculative decoding:
|
||||||
--model-draft MODEL_DRAFT Path to the draft model for speculative decoding.
|
--model-draft MODEL_DRAFT Path to the draft model for speculative decoding.
|
||||||
|
|
@ -271,7 +270,7 @@ ExLlamaV2:
|
||||||
--no_flash_attn Force flash-attention to not be used.
|
--no_flash_attn Force flash-attention to not be used.
|
||||||
--no_xformers Force xformers to not be used.
|
--no_xformers Force xformers to not be used.
|
||||||
--no_sdpa Force Torch SDPA to not be used.
|
--no_sdpa Force Torch SDPA to not be used.
|
||||||
--num_experts_per_token NUM_EXPERTS_PER_TOKEN Number of experts to use for generation. Applies to MoE models like Mixtral.
|
--num_experts_per_token N Number of experts to use for generation. Applies to MoE models like Mixtral.
|
||||||
--enable_tp Enable Tensor Parallelism (TP) in ExLlamaV2.
|
--enable_tp Enable Tensor Parallelism (TP) in ExLlamaV2.
|
||||||
|
|
||||||
HQQ:
|
HQQ:
|
||||||
|
|
|
||||||
|
|
@ -131,7 +131,7 @@ group.add_argument('--streaming-llm', action='store_true', help='Activate Stream
|
||||||
|
|
||||||
# Cache
|
# Cache
|
||||||
group = parser.add_argument_group('Context and cache management')
|
group = parser.add_argument_group('Context and cache management')
|
||||||
group.add_argument('--ctx-size', '--n_ctx', '--max_seq_len', type=int, default=8192, help='Context size in tokens.')
|
group.add_argument('--ctx-size', '--n_ctx', '--max_seq_len', type=int, default=8192, metavar='N', help='Context size in tokens.')
|
||||||
group.add_argument('--cache_type', type=str, default='fp16', help='KV cache type; valid options: llama.cpp - fp16, q8_0, q4_0; ExLlamaV2 - fp16, fp8, q8, q6, q4; ExLlamaV3 - fp16, q2 to q8 (can specify k_bits and v_bits separately, e.g. q4_q8).')
|
group.add_argument('--cache_type', type=str, default='fp16', help='KV cache type; valid options: llama.cpp - fp16, q8_0, q4_0; ExLlamaV2 - fp16, fp8, q8, q6, q4; ExLlamaV3 - fp16, q2 to q8 (can specify k_bits and v_bits separately, e.g. q4_q8).')
|
||||||
|
|
||||||
# Speculative decoding
|
# Speculative decoding
|
||||||
|
|
@ -150,7 +150,7 @@ group.add_argument('--cfg-cache', action='store_true', help='ExLlamav2_HF: Creat
|
||||||
group.add_argument('--no_flash_attn', action='store_true', help='Force flash-attention to not be used.')
|
group.add_argument('--no_flash_attn', action='store_true', help='Force flash-attention to not be used.')
|
||||||
group.add_argument('--no_xformers', action='store_true', help='Force xformers to not be used.')
|
group.add_argument('--no_xformers', action='store_true', help='Force xformers to not be used.')
|
||||||
group.add_argument('--no_sdpa', action='store_true', help='Force Torch SDPA to not be used.')
|
group.add_argument('--no_sdpa', action='store_true', help='Force Torch SDPA to not be used.')
|
||||||
group.add_argument('--num_experts_per_token', type=int, default=2, help='Number of experts to use for generation. Applies to MoE models like Mixtral.')
|
group.add_argument('--num_experts_per_token', type=int, default=2, metavar='N', help='Number of experts to use for generation. Applies to MoE models like Mixtral.')
|
||||||
group.add_argument('--enable_tp', action='store_true', help='Enable Tensor Parallelism (TP) in ExLlamaV2.')
|
group.add_argument('--enable_tp', action='store_true', help='Enable Tensor Parallelism (TP) in ExLlamaV2.')
|
||||||
|
|
||||||
# HQQ
|
# HQQ
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue