diff --git a/extensions/openai/images.py b/extensions/openai/images.py index 0bb91a1e..e60470c3 100644 --- a/extensions/openai/images.py +++ b/extensions/openai/images.py @@ -48,10 +48,16 @@ def generations(request): resp = {'created': int(time.time()), 'data': []} for img in images: b64 = _image_to_base64(img) + image_obj = { + 'revised_prompt': img.info.get('revised_prompt', request.prompt) + } + if request.response_format == 'b64_json': - resp['data'].append({'b64_json': b64}) + image_obj['b64_json'] = b64 else: - resp['data'].append({'url': f'data:image/png;base64,{b64}'}) + image_obj['url'] = f'data:image/png;base64,{b64}' + + resp['data'].append(image_obj) return resp diff --git a/modules/ui_image_generation.py b/modules/ui_image_generation.py index 2cad5dc4..424589b6 100644 --- a/modules/ui_image_generation.py +++ b/modules/ui_image_generation.py @@ -856,7 +856,13 @@ def generate(state, save_images=True): if magic_suffix.strip(", ") not in clean_prompt: gen_kwargs["prompt"] = clean_prompt + magic_suffix - result_holder.extend(shared.image_model(**gen_kwargs).images) + batch_results = shared.image_model(**gen_kwargs).images + + # Store the modified prompt in the metadata + for img in batch_results: + img.info["revised_prompt"] = clean_prompt + + result_holder.extend(batch_results) gen_kwargs["prompt"] = clean_prompt # restore except Exception as e: error_holder.append(e)