ExllamaV2 tensor parallelism to increase multi gpu inference speeds (#6356)

This commit is contained in:
RandoInternetPreson 2024-09-27 23:26:03 -04:00 committed by GitHub
parent 301375834e
commit 46996f6519
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 38 additions and 17 deletions

View file

@ -146,6 +146,7 @@ group.add_argument('--no_sdpa', action='store_true', help='Force Torch SDPA to n
group.add_argument('--cache_8bit', action='store_true', help='Use 8-bit cache to save VRAM.')
group.add_argument('--cache_4bit', action='store_true', help='Use Q4 cache to save VRAM.')
group.add_argument('--num_experts_per_token', type=int, default=2, help='Number of experts to use for generation. Applies to MoE models like Mixtral.')
group.add_argument('--enable_tp', action='store_true', help='Enable Tensor Parallelism (TP) in ExLlamaV2.')
# AutoGPTQ
group = parser.add_argument_group('AutoGPTQ')