diff --git a/extensions/openai/images.py b/extensions/openai/images.py index f46d549d..ef3f4169 100644 --- a/extensions/openai/images.py +++ b/extensions/openai/images.py @@ -47,16 +47,10 @@ def generations(request): resp = {'created': int(time.time()), 'data': []} for img in images: b64 = _image_to_base64(img) - image_obj = { - 'revised_prompt': img.info.get('revised_prompt', request.prompt) - } - if request.response_format == 'b64_json': - image_obj['b64_json'] = b64 + resp['data'].append({'b64_json': b64}) else: - image_obj['url'] = f'data:image/png;base64,{b64}' - - resp['data'].append(image_obj) + resp['data'].append({'url': f'data:image/png;base64,{b64}'}) return resp diff --git a/modules/ui_image_generation.py b/modules/ui_image_generation.py index 424589b6..2cad5dc4 100644 --- a/modules/ui_image_generation.py +++ b/modules/ui_image_generation.py @@ -856,13 +856,7 @@ def generate(state, save_images=True): if magic_suffix.strip(", ") not in clean_prompt: gen_kwargs["prompt"] = clean_prompt + magic_suffix - batch_results = shared.image_model(**gen_kwargs).images - - # Store the modified prompt in the metadata - for img in batch_results: - img.info["revised_prompt"] = clean_prompt - - result_holder.extend(batch_results) + result_holder.extend(shared.image_model(**gen_kwargs).images) gen_kwargs["prompt"] = clean_prompt # restore except Exception as e: error_holder.append(e)