Revert "Image: Add the LLM-generated prompt to the API result"

This reverts commit c7ad28a4cd.
This commit is contained in:
oobabooga 2025-12-04 17:34:27 -08:00
parent 3ef428efaa
commit 56f2a9512f
2 changed files with 3 additions and 15 deletions

View file

@ -47,16 +47,10 @@ def generations(request):
resp = {'created': int(time.time()), 'data': []}
for img in images:
b64 = _image_to_base64(img)
image_obj = {
'revised_prompt': img.info.get('revised_prompt', request.prompt)
}
if request.response_format == 'b64_json':
image_obj['b64_json'] = b64
resp['data'].append({'b64_json': b64})
else:
image_obj['url'] = f'data:image/png;base64,{b64}'
resp['data'].append(image_obj)
resp['data'].append({'url': f'data:image/png;base64,{b64}'})
return resp

View file

@ -856,13 +856,7 @@ def generate(state, save_images=True):
if magic_suffix.strip(", ") not in clean_prompt:
gen_kwargs["prompt"] = clean_prompt + magic_suffix
batch_results = shared.image_model(**gen_kwargs).images
# Store the modified prompt in the metadata
for img in batch_results:
img.info["revised_prompt"] = clean_prompt
result_holder.extend(batch_results)
result_holder.extend(shared.image_model(**gen_kwargs).images)
gen_kwargs["prompt"] = clean_prompt # restore
except Exception as e:
error_holder.append(e)