mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2025-12-06 07:12:10 +01:00
Code formatting
This commit is contained in:
parent
5b06284a8a
commit
e0f5905a97
|
|
@ -236,11 +236,12 @@ class Exllamav3Model:
|
||||||
"""
|
"""
|
||||||
Generate text with streaming using native ExLlamaV3 API
|
Generate text with streaming using native ExLlamaV3 API
|
||||||
"""
|
"""
|
||||||
image_embeddings = []
|
|
||||||
|
|
||||||
if shared.is_multimodal:
|
if shared.is_multimodal:
|
||||||
# Process images and modify prompt (ExLlamaV3-specific)
|
# Process images and modify prompt (ExLlamaV3-specific)
|
||||||
prompt, image_embeddings = self._process_images_for_generation(prompt, state)
|
prompt, image_embeddings = self._process_images_for_generation(prompt, state)
|
||||||
|
else:
|
||||||
|
image_embeddings = []
|
||||||
|
|
||||||
# Greedy decoding is a special case
|
# Greedy decoding is a special case
|
||||||
if state['temperature'] == 0:
|
if state['temperature'] == 0:
|
||||||
|
|
|
||||||
|
|
@ -155,7 +155,7 @@ def get_pytorch_update_command(gpu_choice):
|
||||||
base_cmd = f"python -m pip install --upgrade torch=={TORCH_VERSION} "
|
base_cmd = f"python -m pip install --upgrade torch=={TORCH_VERSION} "
|
||||||
|
|
||||||
if gpu_choice == "NVIDIA_CUDA128":
|
if gpu_choice == "NVIDIA_CUDA128":
|
||||||
return "python -m pip install --upgrade torch==2.7.1 --index-url https://download.pytorch.org/whl/cu128"
|
return f"{base_cmd} --index-url https://download.pytorch.org/whl/cu128"
|
||||||
elif gpu_choice == "AMD":
|
elif gpu_choice == "AMD":
|
||||||
return f"{base_cmd} --index-url https://download.pytorch.org/whl/rocm6.2.4"
|
return f"{base_cmd} --index-url https://download.pytorch.org/whl/rocm6.2.4"
|
||||||
elif gpu_choice in ["APPLE", "NONE"]:
|
elif gpu_choice in ["APPLE", "NONE"]:
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue