mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2026-03-06 13:43:49 +01:00
Remove quanto for now (requires messy compilation)
This commit is contained in:
parent
b3666e140d
commit
6291e72129
|
|
@ -11,7 +11,7 @@ def get_quantization_config(quant_method):
|
|||
Get the appropriate quantization config based on the selected method.
|
||||
|
||||
Args:
|
||||
quant_method: One of 'none', 'bnb-8bit', 'bnb-4bit', 'quanto-8bit', 'quanto-4bit', 'quanto-2bit'
|
||||
quant_method: One of 'none', 'bnb-8bit', 'bnb-4bit'
|
||||
|
||||
Returns:
|
||||
PipelineQuantizationConfig or None
|
||||
|
|
@ -46,30 +46,6 @@ def get_quantization_config(quant_method):
|
|||
}
|
||||
)
|
||||
|
||||
# Quanto 8-bit quantization
|
||||
elif quant_method == 'quanto-8bit':
|
||||
return PipelineQuantizationConfig(
|
||||
quant_mapping={
|
||||
"transformer": QuantoConfig(weights_dtype="int8")
|
||||
}
|
||||
)
|
||||
|
||||
# Quanto 4-bit quantization
|
||||
elif quant_method == 'quanto-4bit':
|
||||
return PipelineQuantizationConfig(
|
||||
quant_mapping={
|
||||
"transformer": QuantoConfig(weights_dtype="int4")
|
||||
}
|
||||
)
|
||||
|
||||
# Quanto 2-bit quantization
|
||||
elif quant_method == 'quanto-2bit':
|
||||
return PipelineQuantizationConfig(
|
||||
quant_mapping={
|
||||
"transformer": QuantoConfig(weights_dtype="int2")
|
||||
}
|
||||
)
|
||||
|
||||
else:
|
||||
logger.warning(f"Unknown quantization method: {quant_method}. Loading without quantization.")
|
||||
return None
|
||||
|
|
@ -101,7 +77,7 @@ def load_image_model(model_name, dtype='bfloat16', attn_backend='sdpa', cpu_offl
|
|||
attn_backend: 'sdpa', 'flash_attention_2', or 'flash_attention_3'
|
||||
cpu_offload: Enable CPU offloading for low VRAM
|
||||
compile_model: Compile the model for faster inference (slow first run)
|
||||
quant_method: Quantization method - 'none', 'bnb-8bit', 'bnb-4bit', 'quanto-8bit', 'quanto-4bit', 'quanto-2bit'
|
||||
quant_method: Quantization method - 'none', 'bnb-8bit', 'bnb-4bit'
|
||||
"""
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ group.add_argument('--image-attn-backend', type=str, default=None, choices=['sdp
|
|||
group.add_argument('--image-cpu-offload', action='store_true', help='Enable CPU offloading for image model.')
|
||||
group.add_argument('--image-compile', action='store_true', help='Compile the image model for faster inference.')
|
||||
group.add_argument('--image-quant', type=str, default=None,
|
||||
choices=['none', 'bnb-8bit', 'bnb-4bit', 'quanto-8bit', 'quanto-4bit', 'quanto-2bit'],
|
||||
choices=['none', 'bnb-8bit', 'bnb-4bit'],
|
||||
help='Quantization method for image model.')
|
||||
|
||||
# Model loader
|
||||
|
|
|
|||
|
|
@ -471,7 +471,7 @@ def create_ui():
|
|||
with gr.Column():
|
||||
shared.gradio['image_quant'] = gr.Dropdown(
|
||||
label='Quantization',
|
||||
choices=['none', 'bnb-8bit', 'bnb-4bit', 'quanto-8bit', 'quanto-4bit', 'quanto-2bit'],
|
||||
choices=['none', 'bnb-8bit', 'bnb-4bit'],
|
||||
value=shared.settings['image_quant'],
|
||||
info='Quantization method for reduced VRAM usage. Quanto supports lower precisions (2-bit, 4-bit, 8-bit).'
|
||||
)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ huggingface-hub==0.36.0
|
|||
jinja2==3.1.6
|
||||
markdown
|
||||
numpy==2.2.*
|
||||
optimum-quanto==0.2.7
|
||||
pandas
|
||||
peft==0.18.*
|
||||
Pillow>=9.5.0
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ huggingface-hub==0.36.0
|
|||
jinja2==3.1.6
|
||||
markdown
|
||||
numpy==2.2.*
|
||||
optimum-quanto==0.2.7
|
||||
pandas
|
||||
peft==0.18.*
|
||||
Pillow>=9.5.0
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ huggingface-hub==0.36.0
|
|||
jinja2==3.1.6
|
||||
markdown
|
||||
numpy==2.2.*
|
||||
optimum-quanto==0.2.7
|
||||
pandas
|
||||
peft==0.18.*
|
||||
Pillow>=9.5.0
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ huggingface-hub==0.36.0
|
|||
jinja2==3.1.6
|
||||
markdown
|
||||
numpy==2.2.*
|
||||
optimum-quanto==0.2.7
|
||||
pandas
|
||||
peft==0.18.*
|
||||
Pillow>=9.5.0
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ huggingface-hub==0.36.0
|
|||
jinja2==3.1.6
|
||||
markdown
|
||||
numpy==2.2.*
|
||||
optimum-quanto==0.2.7
|
||||
pandas
|
||||
peft==0.18.*
|
||||
Pillow>=9.5.0
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ huggingface-hub==0.36.0
|
|||
jinja2==3.1.6
|
||||
markdown
|
||||
numpy==2.2.*
|
||||
optimum-quanto==0.2.7
|
||||
pandas
|
||||
peft==0.18.*
|
||||
Pillow>=9.5.0
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ huggingface-hub==0.36.0
|
|||
jinja2==3.1.6
|
||||
markdown
|
||||
numpy==2.2.*
|
||||
optimum-quanto==0.2.7
|
||||
pandas
|
||||
peft==0.18.*
|
||||
Pillow>=9.5.0
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ huggingface-hub==0.36.0
|
|||
jinja2==3.1.6
|
||||
markdown
|
||||
numpy==2.2.*
|
||||
optimum-quanto==0.2.7
|
||||
pandas
|
||||
peft==0.18.*
|
||||
Pillow>=9.5.0
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ huggingface-hub==0.36.0
|
|||
jinja2==3.1.6
|
||||
markdown
|
||||
numpy==2.2.*
|
||||
optimum-quanto==0.2.7
|
||||
pandas
|
||||
peft==0.18.*
|
||||
Pillow>=9.5.0
|
||||
|
|
|
|||
Loading…
Reference in a new issue