mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2026-02-04 23:04:49 +01:00
Image: Remove llm_variations from the API
This commit is contained in:
parent
c7ad28a4cd
commit
3ef428efaa
|
|
@ -35,8 +35,7 @@ def generations(request):
|
|||
'image_batch_size': request.batch_size,
|
||||
'image_batch_count': request.batch_count,
|
||||
'image_cfg_scale': request.cfg_scale,
|
||||
'image_llm_variations': request.llm_variations,
|
||||
'image_llm_variations_prompt': request.llm_variations_prompt or shared.settings.get('image_llm_variations_prompt', ''),
|
||||
'image_llm_variations': False,
|
||||
})
|
||||
|
||||
# Exhaust generator, keep final result
|
||||
|
|
|
|||
|
|
@ -264,7 +264,7 @@ class LoadLorasRequest(BaseModel):
|
|||
lora_names: List[str]
|
||||
|
||||
|
||||
class ImageGenerationRequestParams(BaseModel):
|
||||
class ImageGenerationRequest(BaseModel):
|
||||
"""Image-specific parameters for generation."""
|
||||
prompt: str
|
||||
negative_prompt: str = ""
|
||||
|
|
@ -275,8 +275,6 @@ class ImageGenerationRequestParams(BaseModel):
|
|||
batch_size: int | None = Field(default=None, ge=1, description="Parallel batch size (VRAM heavy)")
|
||||
n: int = Field(default=1, ge=1, description="Alias for batch_size (OpenAI compatibility)")
|
||||
batch_count: int = Field(default=1, ge=1, description="Sequential batch count")
|
||||
llm_variations: bool = False
|
||||
llm_variations_prompt: str | None = None
|
||||
|
||||
# OpenAI compatibility (unused)
|
||||
model: str | None = None
|
||||
|
|
@ -297,10 +295,6 @@ class ImageGenerationRequestParams(BaseModel):
|
|||
return 1024, 1024
|
||||
|
||||
|
||||
class ImageGenerationRequest(GenerationOptions, ImageGenerationRequestParams):
|
||||
pass
|
||||
|
||||
|
||||
class ImageGenerationResponse(BaseModel):
|
||||
created: int = int(time.time())
|
||||
data: List[dict]
|
||||
|
|
|
|||
Loading…
Reference in a new issue