shared.gradio['filter_by_loader']=gr.Dropdown(label=t("Filter by loader"),choices=["All"]+list(loaders.loaders_and_params.keys())ifnotshared.args.portableelse['llama.cpp'],value="All",elem_classes='slim-dropdown')
shared.gradio['smoothing_curve']=gr.Slider(1.0,10.0,value=shared.settings['smoothing_curve'],step=0.01,label='smoothing_curve',info=t('Adjusts the dropoff curve of Quadratic Sampling.'))
shared.gradio['xtc_threshold']=gr.Slider(0,0.5,value=shared.settings['xtc_threshold'],step=0.01,label=t('xtc_threshold'),info=t('If 2 or more tokens have probability above this threshold, consider removing all but the last one.'))
shared.gradio['xtc_probability']=gr.Slider(0,1,value=shared.settings['xtc_probability'],step=0.01,label=t('xtc_probability'),info=t('Probability that the removal will actually happen. 0 disables the sampler. 1 makes it always happen.'))
shared.gradio['dry_multiplier']=gr.Slider(0,5,value=shared.settings['dry_multiplier'],step=0.01,label=t('dry_multiplier'),info=t('Set to greater than 0 to enable DRY. Recommended value: 0.8.'))
shared.gradio['dry_allowed_length']=gr.Slider(1,20,value=shared.settings['dry_allowed_length'],step=1,label=t('dry_allowed_length'),info=t('Longest sequence that can be repeated without being penalized.'))
shared.gradio['dry_base']=gr.Slider(1,4,value=shared.settings['dry_base'],step=0.01,label=t('dry_base'),info=t('Controls how fast the penalty grows with increasing sequence length.'))
shared.gradio['penalty_alpha']=gr.Slider(0,5,value=shared.settings['penalty_alpha'],label='penalty_alpha',info=t('For Contrastive Search. do_sample must be unchecked.'))
shared.gradio['guidance_scale']=gr.Slider(-0.5,2.5,step=0.05,value=shared.settings['guidance_scale'],label='guidance_scale',info=t('For CFG. 1.5 is a good value.'))
shared.gradio['mirostat_mode']=gr.Slider(0,2,step=1,value=shared.settings['mirostat_mode'],label=t('mirostat_mode'),info=t('mode=1 is for llama.cpp only.'))
shared.gradio['temperature_last']=gr.Checkbox(value=shared.settings['temperature_last'],label=t('temperature_last'),info=t('Moves temperature/dynamic temperature/quadratic sampling to the end of the sampler stack, ignoring their positions in "Sampler priority".'))
shared.gradio['sampler_priority']=gr.Textbox(value=shared.settings['sampler_priority'],lines=10,label=t('Sampler priority'),info=t('Parameter names separated by new lines or commas.'),elem_classes=['add_scrollbar'])
shared.gradio['dry_sequence_breakers']=gr.Textbox(value=shared.settings['dry_sequence_breakers'],label=t('dry_sequence_breakers'),info=t('Tokens across which sequence matching is not continued. Specified as a comma-separated list of quoted strings.'))
shared.gradio['max_new_tokens']=gr.Slider(minimum=shared.settings['max_new_tokens_min'],maximum=shared.settings['max_new_tokens_max'],value=shared.settings['max_new_tokens'],step=1,label=t('max_new_tokens'),info=t('⚠️ Setting this too high can cause prompt truncation.'))
shared.gradio['max_tokens_second']=gr.Slider(value=shared.settings['max_tokens_second'],minimum=0,maximum=20,step=1,label=t('Maximum tokens/second'),info=t('To make text readable in real time.'))
shared.gradio['auto_max_new_tokens']=gr.Checkbox(value=shared.settings['auto_max_new_tokens'],label=t('auto_max_new_tokens'),info=t('Expand max_new_tokens to the available context length.'))
shared.gradio['ban_eos_token']=gr.Checkbox(value=shared.settings['ban_eos_token'],label=t('Ban the eos_token'),info=t('Forces the model to never end the generation prematurely.'))
shared.gradio['add_bos_token']=gr.Checkbox(value=shared.settings['add_bos_token'],label=t('Add the bos_token to the beginning of prompts'),info=t('Only applies to text completion (notebook). In chat mode, templates control BOS tokens.'))
shared.gradio['skip_special_tokens']=gr.Checkbox(value=shared.settings['skip_special_tokens'],label='Skip special tokens',info=t('Some specific models need this unset.'))
shared.gradio['stream']=gr.Checkbox(value=shared.settings['stream'],label=t('Activate text streaming'))
shared.gradio['static_cache']=gr.Checkbox(value=shared.settings['static_cache'],label=t('Static KV cache'),info=t('Use a static cache for improved performance.'))
shared.gradio['truncation_length']=gr.Number(precision=0,step=256,value=get_truncation_length(),label=t('Truncate the prompt up to this length'),info=t('The leftmost tokens are removed if the prompt exceeds this length.'))
shared.gradio['seed']=gr.Number(value=shared.settings['seed'],label=t('Seed (-1 for random)'))
shared.gradio['custom_system_message']=gr.Textbox(value=shared.settings['custom_system_message'],lines=2,label=t('Custom system message'),info=t('If not empty, will be used instead of the default one.'),elem_classes=['add_scrollbar'])
shared.gradio['custom_stopping_strings']=gr.Textbox(lines=2,value=shared.settings["custom_stopping_strings"]orNone,label=t('Custom stopping strings'),info=t('Written between \"\" and separated by commas.'),placeholder='"\\n", "\\nYou:"')
shared.gradio['custom_token_bans']=gr.Textbox(value=shared.settings['custom_token_bans']orNone,label=t('Token bans'),info=t('Token IDs to ban, separated by commas. The IDs can be found in the Default or Notebook tab.'))
shared.gradio['negative_prompt']=gr.Textbox(value=shared.settings['negative_prompt'],label=t('Negative prompt'),info=t('For CFG. Only used when guidance_scale is different than 1.'),lines=3,elem_classes=['add_scrollbar'])
shared.gradio['grammar_file']=gr.Dropdown(value='None',choices=utils.get_available_grammars(),label=t('Load grammar from file (.gbnf)'),elem_classes='slim-dropdown')