Image: Add the LLM-generated prompt to the API result

This commit is contained in:
oobabooga 2025-12-04 17:20:50 -08:00
parent b451bac082
commit c7ad28a4cd
2 changed files with 15 additions and 3 deletions

View file

@ -48,10 +48,16 @@ def generations(request):
resp = {'created': int(time.time()), 'data': []}
for img in images:
b64 = _image_to_base64(img)
image_obj = {
'revised_prompt': img.info.get('revised_prompt', request.prompt)
}
if request.response_format == 'b64_json':
resp['data'].append({'b64_json': b64})
image_obj['b64_json'] = b64
else:
resp['data'].append({'url': f'data:image/png;base64,{b64}'})
image_obj['url'] = f'data:image/png;base64,{b64}'
resp['data'].append(image_obj)
return resp

View file

@ -856,7 +856,13 @@ def generate(state, save_images=True):
if magic_suffix.strip(", ") not in clean_prompt:
gen_kwargs["prompt"] = clean_prompt + magic_suffix
result_holder.extend(shared.image_model(**gen_kwargs).images)
batch_results = shared.image_model(**gen_kwargs).images
# Store the modified prompt in the metadata
for img in batch_results:
img.info["revised_prompt"] = clean_prompt
result_holder.extend(batch_results)
gen_kwargs["prompt"] = clean_prompt # restore
except Exception as e:
error_holder.append(e)