Add a --torch-compile flag for transformers

This commit is contained in:
oobabooga 2025-01-05 05:45:12 -08:00
parent 11af199aff
commit c0f600c887
5 changed files with 9 additions and 2 deletions

View file

@ -104,6 +104,7 @@ group.add_argument('--force-safetensors', action='store_true', help='Set use_saf
group.add_argument('--no_use_fast', action='store_true', help='Set use_fast=False while loading the tokenizer (it\'s True by default). Use this if you have any problems related to use_fast.')
group.add_argument('--use_flash_attention_2', action='store_true', help='Set use_flash_attention_2=True while loading the model.')
group.add_argument('--use_eager_attention', action='store_true', help='Set attn_implementation= eager while loading the model.')
group.add_argument('--torch-compile', action='store_true', help='Compile the model with torch.compile for improved performance.')
# bitsandbytes 4-bit
group = parser.add_argument_group('bitsandbytes 4-bit')