diff --git a/extensions/openai/completions.py b/extensions/openai/completions.py index ff64527a..f4944060 100644 --- a/extensions/openai/completions.py +++ b/extensions/openai/completions.py @@ -7,7 +7,6 @@ import tiktoken from pydantic import ValidationError from extensions.openai.errors import InvalidRequestError -from extensions.openai.image_utils import convert_openai_messages_to_images from extensions.openai.typing import ToolDefinition from extensions.openai.utils import debug_msg, getToolCallId, parseToolCall from modules import shared @@ -17,6 +16,7 @@ from modules.chat import ( load_character_memoized, load_instruction_template_memoized ) +from modules.image_utils import convert_openai_messages_to_images from modules.logging_colors import logger from modules.presets import load_preset_memoized from modules.text_generation import decode, encode, generate_reply diff --git a/modules/exllamav3.py b/modules/exllamav3.py index 9201801c..9d597ce7 100644 --- a/modules/exllamav3.py +++ b/modules/exllamav3.py @@ -18,11 +18,11 @@ from exllamav3.generator.sampler import ( SS_TopP ) -from extensions.openai.image_utils import ( +from modules import shared +from modules.image_utils import ( convert_image_attachments_to_pil, convert_openai_messages_to_images ) -from modules import shared from modules.logging_colors import logger from modules.text_generation import get_max_prompt_length from modules.torch_utils import clear_torch_cache diff --git a/extensions/openai/image_utils.py b/modules/image_utils.py similarity index 100% rename from extensions/openai/image_utils.py rename to modules/image_utils.py diff --git a/modules/llama_cpp_server.py b/modules/llama_cpp_server.py index 072ff83b..3e8127ab 100644 --- a/modules/llama_cpp_server.py +++ b/modules/llama_cpp_server.py @@ -12,11 +12,11 @@ from pathlib import Path import llama_cpp_binaries import requests -from extensions.openai.image_utils import ( +from modules import shared +from modules.image_utils import ( convert_image_attachments_to_pil, convert_pil_to_base64 ) -from modules import shared from modules.logging_colors import logger llamacpp_valid_cache_types = {"fp16", "q8_0", "q4_0"}