Better default settings for Speculative Decoding

This commit is contained in:
oobabooga 2025-04-26 17:24:40 -07:00
parent bf2aa19b21
commit 4ff91b6588
2 changed files with 2 additions and 3 deletions

View file

@ -14,7 +14,6 @@ from modules.logging_colors import logger
model = None
tokenizer = None
model_name = 'None'
draft_model_name = 'None'
is_seq2seq = False
model_dirty_from_training = False
lora_names = []
@ -138,7 +137,7 @@ group.add_argument('--cache_type', type=str, default='fp16', help='KV cache type
group = parser.add_argument_group('Speculative decoding')
group.add_argument('--model-draft', type=str, default=None, help='Path to the draft model for speculative decoding.')
group.add_argument('--draft-max', type=int, default=4, help='Number of tokens to draft for speculative decoding.')
group.add_argument('--gpu-layers-draft', type=int, default=0, help='Number of layers to offload to the GPU for the draft model.')
group.add_argument('--gpu-layers-draft', type=int, default=256, help='Number of layers to offload to the GPU for the draft model.')
group.add_argument('--device-draft', type=str, default=None, help='Comma-separated list of devices to use for offloading the draft model. Example: CUDA0,CUDA1')
group.add_argument('--ctx-size-draft', type=int, default=0, help='Size of the prompt context for the draft model. If 0, uses the same as the main model.')