Add StreamingLLM for llamacpp & llamacpp_HF (2nd attempt) (#5669)

This commit is contained in:
oobabooga 2024-03-09 00:25:33 -03:00 committed by GitHub
parent 9271e80914
commit afb51bd5d6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 147 additions and 0 deletions

View file

@ -130,6 +130,8 @@ group.add_argument('--logits_all', action='store_true', help='Needs to be set fo
group.add_argument('--no_offload_kqv', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
group.add_argument('--cache-capacity', type=str, help='Maximum cache capacity (llama-cpp-python). Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
group.add_argument('--row_split', action='store_true', help='Split the model by rows across GPUs. This may improve multi-gpu performance.')
group.add_argument('--streaming-llm', action='store_true', help='Activates StreamingLLM, which prevents the prompt from ever being reevaluated when old chat messages are removed due to the context length for the model being reached.')
group.add_argument('--attention-sink-size', type=int, default=5, help='Minimum attention sink length from StreamingLLM.')
# ExLlamaV2
group = parser.add_argument_group('ExLlamaV2')