mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2026-04-10 09:03:40 +00:00
Remove the --torch-compile option (it doesn't do anything currently)
This commit is contained in:
parent
5a8a9c22e8
commit
1d1b20bd77
6 changed files with 5 additions and 13 deletions
|
|
@ -39,7 +39,6 @@ loaders_and_params = OrderedDict({
|
|||
'quant_type',
|
||||
'load_in_8bit',
|
||||
'load_in_4bit',
|
||||
'torch_compile',
|
||||
'attn_implementation',
|
||||
'cpu',
|
||||
'disk',
|
||||
|
|
|
|||
|
|
@ -62,7 +62,6 @@ group.add_argument('--trust-remote-code', action='store_true', help='Set trust_r
|
|||
group.add_argument('--force-safetensors', action='store_true', help='Set use_safetensors=True while loading the model. This prevents arbitrary code execution.')
|
||||
group.add_argument('--no_use_fast', action='store_true', help='Set use_fast=False while loading the tokenizer (it\'s True by default). Use this if you have any problems related to use_fast.')
|
||||
group.add_argument('--attn-implementation', type=str, default='sdpa', metavar="IMPLEMENTATION", help='Attention implementation. Valid options: sdpa, eager, flash_attention_2.')
|
||||
group.add_argument('--torch-compile', action='store_true', help='Compile the model with torch.compile for improved performance.')
|
||||
|
||||
# bitsandbytes 4-bit
|
||||
group = parser.add_argument_group('bitsandbytes 4-bit')
|
||||
|
|
|
|||
|
|
@ -258,9 +258,6 @@ def load_model_HF(model_name):
|
|||
print()
|
||||
model = LoaderClass.from_pretrained(path_to_model, **params)
|
||||
|
||||
if shared.args.torch_compile:
|
||||
model = torch.compile(model)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -142,7 +142,6 @@ def list_model_elements():
|
|||
'num_experts_per_token',
|
||||
'load_in_8bit',
|
||||
'load_in_4bit',
|
||||
'torch_compile',
|
||||
'flash_attn',
|
||||
'attn_implementation',
|
||||
'cpu',
|
||||
|
|
|
|||
|
|
@ -52,7 +52,6 @@ def create_ui():
|
|||
shared.gradio['streaming_llm'] = gr.Checkbox(label="streaming-llm", value=shared.args.streaming_llm, info='Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')
|
||||
shared.gradio['load_in_8bit'] = gr.Checkbox(label="load-in-8bit", value=shared.args.load_in_8bit)
|
||||
shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit)
|
||||
shared.gradio['torch_compile'] = gr.Checkbox(label="torch-compile", value=shared.args.torch_compile, info='Compile the model with torch.compile for improved performance.')
|
||||
shared.gradio['use_double_quant'] = gr.Checkbox(label="use_double_quant", value=shared.args.use_double_quant, info='Used by load-in-4bit.')
|
||||
shared.gradio['autosplit'] = gr.Checkbox(label="autosplit", value=shared.args.autosplit, info='Automatically split the model tensors across the available GPUs.')
|
||||
shared.gradio['enable_tp'] = gr.Checkbox(label="enable_tp", value=shared.args.enable_tp, info='Enable Tensor Parallelism (TP).')
|
||||
|
|
@ -131,7 +130,7 @@ def create_ui():
|
|||
def create_event_handlers():
|
||||
mu = shared.args.multi_user
|
||||
if mu:
|
||||
return
|
||||
return
|
||||
|
||||
shared.gradio['loader'].change(loaders.make_loader_params_visible, gradio('loader'), gradio(loaders.get_all_params()), show_progress=False)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue