mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2026-04-05 06:35:15 +00:00
Bump llama-cpp-python to 0.2.69, add --flash-attn option
This commit is contained in:
parent
0476f9fe70
commit
e61055253c
15 changed files with 66 additions and 58 deletions
|
|
@ -217,7 +217,8 @@ class LlamacppHF(PreTrainedModel):
|
|||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'logits_all': shared.args.logits_all,
|
||||
'offload_kqv': not shared.args.no_offload_kqv,
|
||||
'split_mode': 1 if not shared.args.row_split else 2
|
||||
'split_mode': 1 if not shared.args.row_split else 2,
|
||||
'flash_attn': shared.args.flash_attn
|
||||
}
|
||||
|
||||
Llama = llama_cpp_lib().Llama
|
||||
|
|
|
|||
|
|
@ -96,7 +96,8 @@ class LlamaCppModel:
|
|||
'tensor_split': tensor_split_list,
|
||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'offload_kqv': not shared.args.no_offload_kqv,
|
||||
'split_mode': 1 if not shared.args.row_split else 2
|
||||
'split_mode': 1 if not shared.args.row_split else 2,
|
||||
'flash_attn': shared.args.flash_attn
|
||||
}
|
||||
|
||||
result.model = Llama(**params)
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ loaders_and_params = OrderedDict({
|
|||
'no_offload_kqv',
|
||||
'row_split',
|
||||
'tensorcores',
|
||||
'flash-attn',
|
||||
'streaming_llm',
|
||||
'attention_sink_size',
|
||||
],
|
||||
|
|
@ -71,6 +72,7 @@ loaders_and_params = OrderedDict({
|
|||
'no_offload_kqv',
|
||||
'row_split',
|
||||
'tensorcores',
|
||||
'flash-attn',
|
||||
'streaming_llm',
|
||||
'attention_sink_size',
|
||||
'llamacpp_HF_info',
|
||||
|
|
|
|||
|
|
@ -114,6 +114,7 @@ group.add_argument('--quant_type', type=str, default='nf4', help='quant_type for
|
|||
|
||||
# llama.cpp
|
||||
group = parser.add_argument_group('llama.cpp')
|
||||
group.add_argument('--flash-attn', action='store_true', help='Use flash-attention.')
|
||||
group.add_argument('--tensorcores', action='store_true', help='Use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards. NVIDIA only.')
|
||||
group.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
||||
group.add_argument('--threads', type=int, default=0, help='Number of threads to use.')
|
||||
|
|
|
|||
|
|
@ -104,6 +104,7 @@ def list_model_elements():
|
|||
'no_offload_kqv',
|
||||
'row_split',
|
||||
'tensorcores',
|
||||
'flash-attn',
|
||||
'streaming_llm',
|
||||
'attention_sink_size',
|
||||
'hqq_backend',
|
||||
|
|
|
|||
|
|
@ -118,6 +118,7 @@ def create_ui():
|
|||
shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit)
|
||||
shared.gradio['use_double_quant'] = gr.Checkbox(label="use_double_quant", value=shared.args.use_double_quant)
|
||||
shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.')
|
||||
shared.gradio['flash-attn'] = gr.Checkbox(label="flash-attn", value=shared.args.flash_attn, info='Use flash-attention.')
|
||||
shared.gradio['auto_devices'] = gr.Checkbox(label="auto-devices", value=shared.args.auto_devices)
|
||||
shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards.')
|
||||
shared.gradio['streaming_llm'] = gr.Checkbox(label="streaming_llm", value=shared.args.streaming_llm, info='(experimental) Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue