Fix parsing of --n_ctx and --max_seq_len

This commit is contained in:
oobabooga 2025-04-26 17:43:53 -07:00
parent 234aba1c50
commit c4afc0421d

View file

@ -130,7 +130,7 @@ group.add_argument('--streaming-llm', action='store_true', help='Activate Stream
# Cache
group = parser.add_argument_group('Context and cache management')
group.add_argument('--ctx-size', '--n_ctx', '--max_seq_len', type=int, default=8192, metavar='N', help='Context size in tokens.')
group.add_argument('--ctx-size', '--n_ctx', '--max_seq_len', type=int, default=8192, metavar='N', dest='ctx_size', help='Context size in tokens.')
group.add_argument('--cache_type', type=str, default='fp16', help='KV cache type; valid options: llama.cpp - fp16, q8_0, q4_0; ExLlamaV2 - fp16, fp8, q8, q6, q4; ExLlamaV3 - fp16, q2 to q8 (can specify k_bits and v_bits separately, e.g. q4_q8).')
# Speculative decoding