2025-12-03 20:50:35 +01:00
|
|
|
"""
|
|
|
|
|
OpenAI-compatible image generation using local diffusion models.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import base64
|
|
|
|
|
import io
|
2023-07-11 23:50:08 +02:00
|
|
|
import time
|
2023-09-16 05:11:16 +02:00
|
|
|
|
|
|
|
|
from extensions.openai.errors import ServiceUnavailableError
|
2025-12-03 20:50:35 +01:00
|
|
|
from modules import shared
|
2023-07-11 23:50:08 +02:00
|
|
|
|
2023-07-12 20:33:25 +02:00
|
|
|
|
2025-12-04 19:23:00 +01:00
|
|
|
def generations(request):
|
2025-12-03 20:50:35 +01:00
|
|
|
"""
|
|
|
|
|
Generate images using the loaded diffusion model.
|
2025-12-04 19:23:00 +01:00
|
|
|
Returns dict with 'created' timestamp and 'data' list of images.
|
2025-12-03 20:50:35 +01:00
|
|
|
"""
|
2025-12-04 19:23:00 +01:00
|
|
|
from modules.ui_image_generation import generate
|
2025-12-03 20:50:35 +01:00
|
|
|
|
|
|
|
|
if shared.image_model is None:
|
|
|
|
|
raise ServiceUnavailableError("No image model loaded. Load a model via the UI first.")
|
|
|
|
|
|
2025-12-04 19:23:00 +01:00
|
|
|
width, height = request.get_width_height()
|
2025-12-03 20:50:35 +01:00
|
|
|
|
2025-12-04 19:23:00 +01:00
|
|
|
# Build state dict: GenerationOptions fields + image-specific keys
|
|
|
|
|
state = request.model_dump()
|
|
|
|
|
state.update({
|
|
|
|
|
'image_model_menu': shared.image_model_name,
|
|
|
|
|
'image_prompt': request.prompt,
|
|
|
|
|
'image_neg_prompt': request.negative_prompt,
|
|
|
|
|
'image_width': width,
|
|
|
|
|
'image_height': height,
|
|
|
|
|
'image_steps': request.steps,
|
|
|
|
|
'image_seed': request.image_seed,
|
|
|
|
|
'image_batch_size': request.batch_size,
|
|
|
|
|
'image_batch_count': request.batch_count,
|
|
|
|
|
'image_cfg_scale': request.cfg_scale,
|
|
|
|
|
'image_llm_variations': request.llm_variations,
|
2025-12-04 19:44:35 +01:00
|
|
|
'image_llm_variations_prompt': request.llm_variations_prompt or shared.settings.get('image_llm_variations_prompt', ''),
|
2025-12-04 19:23:00 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
|
|
# Exhaust generator, keep final result
|
|
|
|
|
images = []
|
|
|
|
|
for images, _ in generate(state, save_images=False):
|
|
|
|
|
pass
|
2025-12-03 20:50:35 +01:00
|
|
|
|
|
|
|
|
# Build response
|
2025-12-04 19:23:00 +01:00
|
|
|
resp = {'created': int(time.time()), 'data': []}
|
|
|
|
|
for img in images:
|
2025-12-03 20:50:35 +01:00
|
|
|
b64 = _image_to_base64(img)
|
2025-12-05 02:20:50 +01:00
|
|
|
image_obj = {
|
|
|
|
|
'revised_prompt': img.info.get('revised_prompt', request.prompt)
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-04 19:23:00 +01:00
|
|
|
if request.response_format == 'b64_json':
|
2025-12-05 02:20:50 +01:00
|
|
|
image_obj['b64_json'] = b64
|
2023-07-11 23:50:08 +02:00
|
|
|
else:
|
2025-12-05 02:20:50 +01:00
|
|
|
image_obj['url'] = f'data:image/png;base64,{b64}'
|
|
|
|
|
|
|
|
|
|
resp['data'].append(image_obj)
|
2023-07-11 23:50:08 +02:00
|
|
|
|
2023-07-12 20:33:25 +02:00
|
|
|
return resp
|
2025-12-03 20:50:35 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def _image_to_base64(image) -> str:
|
|
|
|
|
buffered = io.BytesIO()
|
|
|
|
|
image.save(buffered, format="PNG")
|
|
|
|
|
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|