parser=argparse.ArgumentParser(description="Text Generation Web UI",conflict_handler='resolve',add_help=True,formatter_class=lambdaprog:argparse.HelpFormatter(prog,max_help_position=55,indent_increment=2,width=200))
group.add_argument('--multi-user',action='store_true',help='Multi-user mode. Chat histories are not saved or automatically loaded. Warning: this is likely not safe for sharing publicly.')
group.add_argument('--model',type=str,help='Name of the model to load by default.')
group.add_argument('--lora',type=str,nargs='+',help='The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.')
group.add_argument('--settings',type=str,help='Load the default interface settings from this yaml file. See user_data/settings-template.yaml for an example. If you create a file called user_data/settings.yaml, this file will be loaded by default without the need to use the --settings flag.')
group.add_argument('--extensions',type=str,nargs='+',help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
group.add_argument('--verbose',action='store_true',help='Print the prompts to the terminal.')
group.add_argument('--idle-timeout',type=int,default=0,help='Unload model after this many minutes of inactivity. It will be automatically reloaded when you try to use it again.')
group.add_argument('--image-dtype',type=str,default='bfloat16',choices=['bfloat16','float16'],help='Data type for image model.')
group.add_argument('--image-attn-backend',type=str,default='sdpa',choices=['sdpa','flash_attention_2','flash_attention_3'],help='Attention backend for image model.')
group.add_argument('--image-cpu-offload',action='store_true',help='Enable CPU offloading for image model.')
group.add_argument('--loader',type=str,help='Choose the model loader manually, otherwise, it will get autodetected. Valid options: Transformers, llama.cpp, ExLlamav3_HF, ExLlamav2_HF, ExLlamav2, TensorRT-LLM.')
group.add_argument('--model-draft',type=str,default=None,help='Path to the draft model for speculative decoding.')
group.add_argument('--draft-max',type=int,default=4,help='Number of tokens to draft for speculative decoding.')
group.add_argument('--gpu-layers-draft',type=int,default=256,help='Number of layers to offload to the GPU for the draft model.')
group.add_argument('--device-draft',type=str,default=None,help='Comma-separated list of devices to use for offloading the draft model. Example: CUDA0,CUDA1')
group.add_argument('--ctx-size-draft',type=int,default=0,help='Size of the prompt context for the draft model. If 0, uses the same as the main model.')
# llama.cpp
group=parser.add_argument_group('llama.cpp')
group.add_argument('--gpu-layers','--n-gpu-layers',type=int,default=256,metavar='N',help='Number of layers to offload to the GPU.')
group.add_argument('--mmproj',type=str,default=None,help='Path to the mmproj file for vision models.')
group.add_argument('--streaming-llm',action='store_true',help='Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')
group.add_argument('--tensor-split',type=str,default=None,help='Split the model across multiple GPUs. Comma-separated list of proportions. Example: 60,40.')
group.add_argument('--row-split',action='store_true',help='Split the model by rows across GPUs. This may improve multi-gpu performance.')
group.add_argument('--no-mmap',action='store_true',help='Prevent mmap from being used.')
group.add_argument('--mlock',action='store_true',help='Force the system to keep the model in RAM.')
group.add_argument('--no-kv-offload',action='store_true',help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
group.add_argument('--batch-size',type=int,default=1024,help='Maximum number of prompt tokens to batch together when calling llama-server. This is the application level batch size')
group.add_argument('--ubatch-size',type=int,default=1024,help='Maximum number of prompt tokens to batch together when calling llama-server. This is the max physical batch size for computation (device level).')
group.add_argument('--threads',type=int,default=0,help='Number of threads to use.')
group.add_argument('--threads-batch',type=int,default=0,help='Number of threads to use for batches/prompt processing.')
group.add_argument('--numa',action='store_true',help='Activate NUMA task allocation for llama.cpp.')
group.add_argument('--extra-flags',type=str,default=None,help='Extra flags to pass to llama-server. Format: "flag1=value1,flag2,flag3=value3". Example: "override-tensor=exps=CPU"')
group.add_argument('--disk',action='store_true',help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
group.add_argument('--load-in-8bit',action='store_true',help='Load the model with 8-bit precision (using bitsandbytes).')
group.add_argument('--bf16',action='store_true',help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
group.add_argument('--no-cache',action='store_true',help='Set use_cache to False while generating text. This reduces VRAM usage slightly, but it comes at a performance cost.')
group.add_argument('--trust-remote-code',action='store_true',help='Set trust_remote_code=True while loading the model. Necessary for some models.')
group.add_argument('--force-safetensors',action='store_true',help='Set use_safetensors=True while loading the model. This prevents arbitrary code execution.')
group.add_argument('--no_use_fast',action='store_true',help='Set use_fast=False while loading the tokenizer (it\'s True by default). Use this if you have any problems related to use_fast.')
group.add_argument('--autosplit',action='store_true',help='Autosplit the model tensors across the available GPUs. This causes --gpu-split to be ignored.')
group.add_argument('--cfg-cache',action='store_true',help='ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader.')
group.add_argument('--num_experts_per_token',type=int,default=2,metavar='N',help='Number of experts to use for generation. Applies to MoE models like Mixtral.')
group.add_argument('--cpp-runner',action='store_true',help='Use the ModelRunnerCpp runner, which is faster than the default ModelRunner but doesn\'t support streaming yet.')
group.add_argument('--alpha_value',type=float,default=1,help='Positional embeddings alpha factor for NTK RoPE scaling. Use either this or compress_pos_emb, not both.')
group.add_argument('--rope_freq_base',type=int,default=0,help='If greater than 0, will be used instead of alpha_value. Those two are related by rope_freq_base = 10000 * alpha_value ^ (64 / 63).')
group.add_argument('--compress_pos_emb',type=int,default=1,help="Positional embeddings compression factor. Should be set to (context length) / (model\'s original context length). Equal to 1/rope_freq_scale.")
group.add_argument('--listen',action='store_true',help='Make the web UI reachable from your local network.')
group.add_argument('--listen-port',type=int,help='The listening port that the server will use.')
group.add_argument('--listen-host',type=str,help='The hostname that the server will use.')
group.add_argument('--share',action='store_true',help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
group.add_argument('--auto-launch',action='store_true',default=False,help='Open the web UI in the default browser upon launch.')
group.add_argument('--gradio-auth',type=str,help='Set Gradio authentication password in the format "username:password". Multiple credentials can also be supplied with "u1:p1,u2:p2,u3:p3".',default=None)
group.add_argument('--gradio-auth-path',type=str,help='Set the Gradio authentication file path. The file should contain one or more user:password pairs in the same format as above.',default=None)
group.add_argument('--ssl-keyfile',type=str,help='The path to the SSL certificate key file.',default=None)
group.add_argument('--ssl-certfile',type=str,help='The path to the SSL certificate cert file.',default=None)
group.add_argument('--admin-key',type=str,default='',help='API authentication key for admin tasks like loading and unloading models. If not set, will be the same as --api-key.')
'chat-instruct_command':'Continue the chat dialogue below. Write a single reply for the character "<|character|>". Reply directly, without starting the reply with the character name.\n\n<|prompt|>',
'context':'The following is a conversation with an AI Large Language Model. The AI has been trained to answer questions, provide recommendations, and help with decision making. The AI follows user requests. The AI thinks outside the box.',
'greeting':'How can I help you today?',
'custom_system_message':'',
'instruction_template_str':"{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n{%- if message['role'] == 'system' -%}\n{%- set ns.found = true -%}\n{%- endif -%}\n{%- endfor -%}\n{%- if not ns.found -%}\n{{- '' + 'Below is an instruction that describes a task. Write a response that appropriately completes the request.' + '\\n\\n' -}}\n{%- endif %}\n{%- for message in messages %}\n{%- if message['role'] == 'system' -%}\n{{- '' + message['content'] + '\\n\\n' -}}\n{%- else -%}\n{%- if message['role'] == 'user' -%}\n{{-'### Instruction:\\n' + message['content'] + '\\n\\n'-}}\n{%- else -%}\n{{-'### Response:\\n' + message['content'] + '\\n\\n' -}}\n{%- endif -%}\n{%- endif -%}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n{{-'### Response:\\n'-}}\n{%- endif -%}",
logger.warning("\nYou are potentially exposing the web UI to the entire internet without any access password.\nYou can create one with the \"--gradio-auth\" flag like this:\n\n--gradio-auth username:password\n\nMake sure to replace username:password with your own.")
ifargs.multi_user:
logger.warning('\nThe multi-user mode is highly experimental and should not be shared publicly.')