diff --git a/.gitignore b/.gitignore index 318e147d..bd69c941 100644 --- a/.gitignore +++ b/.gitignore @@ -1,26 +1,8 @@ -/cache -/characters /css /extensions -/grammars /installer_files -/logs -/loras -/models -/presets -/prompts /repositories -/softprompts -/torch-dumps -/training/datasets - -/CMD_FLAGS.txt -/img_bot* -/img_me* -/models/config-user.yaml -/notification.mp3 -/settings*.json -/settings*.yaml +/user_data .chroma .DS_Store diff --git a/README.md b/README.md index f62e3508..58f77786 100644 --- a/README.md +++ b/README.md @@ -182,131 +182,140 @@ List of command-line flags ```txt -usage: server.py [-h] [--multi-user] [--character CHARACTER] [--model MODEL] [--lora LORA [LORA ...]] [--model-dir MODEL_DIR] [--lora-dir LORA_DIR] [--settings SETTINGS] - [--extensions EXTENSIONS [EXTENSIONS ...]] [--verbose] [--idle-timeout IDLE_TIMEOUT] [--loader LOADER] [--cpu] [--auto-devices] [--gpu-memory GPU_MEMORY [GPU_MEMORY ...]] - [--cpu-memory CPU_MEMORY] [--disk] [--disk-cache-dir DISK_CACHE_DIR] [--load-in-8bit] [--bf16] [--no-cache] [--trust-remote-code] [--force-safetensors] [--no_use_fast] - [--use_flash_attention_2] [--use_eager_attention] [--torch-compile] [--load-in-4bit] [--use_double_quant] [--compute_dtype COMPUTE_DTYPE] [--quant_type QUANT_TYPE] [--flash-attn] - [--n_ctx N_CTX] [--threads THREADS] [--threads-batch THREADS_BATCH] [--batch-size BATCH_SIZE] [--no-mmap] [--mlock] [--n-gpu-layers N_GPU_LAYERS] [--tensor-split TENSOR_SPLIT] - [--numa] [--no-kv-offload] [--row-split] [--gpu-split GPU_SPLIT] [--autosplit] [--max_seq_len MAX_SEQ_LEN] [--cfg-cache] [--no_flash_attn] [--no_xformers] [--no_sdpa] - [--num_experts_per_token NUM_EXPERTS_PER_TOKEN] [--enable_tp] [--hqq-backend HQQ_BACKEND] [--cpp-runner] [--cache_type CACHE_TYPE] [--deepspeed] [--nvme-offload-dir NVME_OFFLOAD_DIR] - [--local_rank LOCAL_RANK] [--alpha_value ALPHA_VALUE] [--rope_freq_base ROPE_FREQ_BASE] [--compress_pos_emb COMPRESS_POS_EMB] [--listen] [--listen-port LISTEN_PORT] - [--listen-host LISTEN_HOST] [--share] [--auto-launch] [--gradio-auth GRADIO_AUTH] [--gradio-auth-path GRADIO_AUTH_PATH] [--ssl-keyfile SSL_KEYFILE] [--ssl-certfile SSL_CERTFILE] - [--subpath SUBPATH] [--old-colors] [--api] [--public-api] [--public-api-id PUBLIC_API_ID] [--api-port API_PORT] [--api-key API_KEY] [--admin-key ADMIN_KEY] [--api-enable-ipv6] - [--api-disable-ipv4] [--nowebui] +usage: server.py [-h] [--multi-user] [--character CHARACTER] [--model MODEL] [--lora LORA [LORA ...]] [--model-dir MODEL_DIR] [--lora-dir LORA_DIR] [--model-menu] [--settings SETTINGS] + [--extensions EXTENSIONS [EXTENSIONS ...]] [--verbose] [--idle-timeout IDLE_TIMEOUT] [--loader LOADER] [--cpu] [--cpu-memory CPU_MEMORY] [--disk] [--disk-cache-dir DISK_CACHE_DIR] + [--load-in-8bit] [--bf16] [--no-cache] [--trust-remote-code] [--force-safetensors] [--no_use_fast] [--use_flash_attention_2] [--use_eager_attention] [--torch-compile] [--load-in-4bit] + [--use_double_quant] [--compute_dtype COMPUTE_DTYPE] [--quant_type QUANT_TYPE] [--flash-attn] [--threads THREADS] [--threads-batch THREADS_BATCH] [--batch-size BATCH_SIZE] [--no-mmap] + [--mlock] [--n-gpu-layers N_GPU_LAYERS] [--tensor-split TENSOR_SPLIT] [--numa] [--no-kv-offload] [--row-split] [--extra-flags EXTRA_FLAGS] [--streaming-llm] [--ctx-size CTX_SIZE] + [--model-draft MODEL_DRAFT] [--draft-max DRAFT_MAX] [--gpu-layers-draft GPU_LAYERS_DRAFT] [--device-draft DEVICE_DRAFT] [--ctx-size-draft CTX_SIZE_DRAFT] [--gpu-split GPU_SPLIT] + [--autosplit] [--cfg-cache] [--no_flash_attn] [--no_xformers] [--no_sdpa] [--num_experts_per_token NUM_EXPERTS_PER_TOKEN] [--enable_tp] [--hqq-backend HQQ_BACKEND] [--cpp-runner] + [--cache_type CACHE_TYPE] [--deepspeed] [--nvme-offload-dir NVME_OFFLOAD_DIR] [--local_rank LOCAL_RANK] [--alpha_value ALPHA_VALUE] [--rope_freq_base ROPE_FREQ_BASE] + [--compress_pos_emb COMPRESS_POS_EMB] [--listen] [--listen-port LISTEN_PORT] [--listen-host LISTEN_HOST] [--share] [--auto-launch] [--gradio-auth GRADIO_AUTH] + [--gradio-auth-path GRADIO_AUTH_PATH] [--ssl-keyfile SSL_KEYFILE] [--ssl-certfile SSL_CERTFILE] [--subpath SUBPATH] [--old-colors] [--api] [--public-api] + [--public-api-id PUBLIC_API_ID] [--api-port API_PORT] [--api-key API_KEY] [--admin-key ADMIN_KEY] [--api-enable-ipv6] [--api-disable-ipv4] [--nowebui] Text generation web UI options: - -h, --help show this help message and exit + -h, --help show this help message and exit Basic settings: - --multi-user Multi-user mode. Chat histories are not saved or automatically loaded. Warning: this is likely not safe for sharing publicly. - --character CHARACTER The name of the character to load in chat mode by default. - --model MODEL Name of the model to load by default. - --lora LORA [LORA ...] The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces. - --model-dir MODEL_DIR Path to directory with all the models. - --lora-dir LORA_DIR Path to directory with all the loras. - --settings SETTINGS Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml, this - file will be loaded by default without the need to use the --settings flag. - --extensions EXTENSIONS [EXTENSIONS ...] The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. - --verbose Print the prompts to the terminal. - --idle-timeout IDLE_TIMEOUT Unload model after this many minutes of inactivity. It will be automatically reloaded when you try to use it again. + --multi-user Multi-user mode. Chat histories are not saved or automatically loaded. Warning: this is likely not safe for sharing publicly. + --character CHARACTER The name of the character to load in chat mode by default. + --model MODEL Name of the model to load by default. + --lora LORA [LORA ...] The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces. + --model-dir MODEL_DIR Path to directory with all the models. + --lora-dir LORA_DIR Path to directory with all the loras. + --model-menu Show a model menu in the terminal when the web UI is first launched. + --settings SETTINGS Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml, + this file will be loaded by default without the need to use the --settings flag. + --extensions EXTENSIONS [EXTENSIONS ...] The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. + --verbose Print the prompts to the terminal. + --idle-timeout IDLE_TIMEOUT Unload model after this many minutes of inactivity. It will be automatically reloaded when you try to use it again. Model loader: - --loader LOADER Choose the model loader manually, otherwise, it will get autodetected. Valid options: Transformers, llama.cpp, ExLlamav3_HF, ExLlamav2_HF, ExLlamav2, - HQQ, TensorRT-LLM. + --loader LOADER Choose the model loader manually, otherwise, it will get autodetected. Valid options: Transformers, llama.cpp, ExLlamav3_HF, ExLlamav2_HF, + ExLlamav2, HQQ, TensorRT-LLM. Transformers/Accelerate: - --cpu Use the CPU to generate text. Warning: Training on CPU is extremely slow. - --auto-devices Automatically split the model across the available GPU(s) and CPU. - --gpu-memory GPU_MEMORY [GPU_MEMORY ...] Maximum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs. You can also set values - in MiB like --gpu-memory 3500MiB. - --cpu-memory CPU_MEMORY Maximum CPU memory in GiB to allocate for offloaded weights. Same as above. - --disk If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. - --disk-cache-dir DISK_CACHE_DIR Directory to save the disk cache to. Defaults to "cache". - --load-in-8bit Load the model with 8-bit precision (using bitsandbytes). - --bf16 Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. - --no-cache Set use_cache to False while generating text. This reduces VRAM usage slightly, but it comes at a performance cost. - --trust-remote-code Set trust_remote_code=True while loading the model. Necessary for some models. - --force-safetensors Set use_safetensors=True while loading the model. This prevents arbitrary code execution. - --no_use_fast Set use_fast=False while loading the tokenizer (it's True by default). Use this if you have any problems related to use_fast. - --use_flash_attention_2 Set use_flash_attention_2=True while loading the model. - --use_eager_attention Set attn_implementation= eager while loading the model. - --torch-compile Compile the model with torch.compile for improved performance. + --cpu Use the CPU to generate text. Warning: Training on CPU is extremely slow. + --cpu-memory CPU_MEMORY Maximum CPU memory in GiB. Use this for CPU offloading. + --disk If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. + --disk-cache-dir DISK_CACHE_DIR Directory to save the disk cache to. Defaults to "user_data/cache". + --load-in-8bit Load the model with 8-bit precision (using bitsandbytes). + --bf16 Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. + --no-cache Set use_cache to False while generating text. This reduces VRAM usage slightly, but it comes at a performance cost. + --trust-remote-code Set trust_remote_code=True while loading the model. Necessary for some models. + --force-safetensors Set use_safetensors=True while loading the model. This prevents arbitrary code execution. + --no_use_fast Set use_fast=False while loading the tokenizer (it's True by default). Use this if you have any problems related to use_fast. + --use_flash_attention_2 Set use_flash_attention_2=True while loading the model. + --use_eager_attention Set attn_implementation= eager while loading the model. + --torch-compile Compile the model with torch.compile for improved performance. bitsandbytes 4-bit: - --load-in-4bit Load the model with 4-bit precision (using bitsandbytes). - --use_double_quant use_double_quant for 4-bit. - --compute_dtype COMPUTE_DTYPE compute dtype for 4-bit. Valid options: bfloat16, float16, float32. - --quant_type QUANT_TYPE quant_type for 4-bit. Valid options: nf4, fp4. + --load-in-4bit Load the model with 4-bit precision (using bitsandbytes). + --use_double_quant use_double_quant for 4-bit. + --compute_dtype COMPUTE_DTYPE compute dtype for 4-bit. Valid options: bfloat16, float16, float32. + --quant_type QUANT_TYPE quant_type for 4-bit. Valid options: nf4, fp4. llama.cpp: - --flash-attn Use flash-attention. - --n_ctx N_CTX Size of the prompt context. - --threads THREADS Number of threads to use. - --threads-batch THREADS_BATCH Number of threads to use for batches/prompt processing. - --batch-size BATCH_SIZE Maximum number of prompt tokens to batch together when calling llama_eval. - --no-mmap Prevent mmap from being used. - --mlock Force the system to keep the model in RAM. - --n-gpu-layers N_GPU_LAYERS Number of layers to offload to the GPU. - --tensor-split TENSOR_SPLIT Split the model across multiple GPUs. Comma-separated list of proportions. Example: 60,40. - --numa Activate NUMA task allocation for llama.cpp. - --no-kv-offload Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance. - --row-split Split the model by rows across GPUs. This may improve multi-gpu performance. + --flash-attn Use flash-attention. + --threads THREADS Number of threads to use. + --threads-batch THREADS_BATCH Number of threads to use for batches/prompt processing. + --batch-size BATCH_SIZE Maximum number of prompt tokens to batch together when calling llama_eval. + --no-mmap Prevent mmap from being used. + --mlock Force the system to keep the model in RAM. + --n-gpu-layers N_GPU_LAYERS Number of layers to offload to the GPU. + --tensor-split TENSOR_SPLIT Split the model across multiple GPUs. Comma-separated list of proportions. Example: 60,40. + --numa Activate NUMA task allocation for llama.cpp. + --no-kv-offload Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance. + --row-split Split the model by rows across GPUs. This may improve multi-gpu performance. + --extra-flags EXTRA_FLAGS Extra flags to pass to llama-server. Format: "flag1=value1;flag2;flag3=value3". Example: "override-tensor=exps=CPU" + --streaming-llm Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed. + +Context and cache management: + --ctx-size CTX_SIZE, --n_ctx CTX_SIZE, --max_seq_len CTX_SIZE + Context size in tokens. + +Speculative decoding: + --model-draft MODEL_DRAFT Path to the draft model for speculative decoding. + --draft-max DRAFT_MAX Number of tokens to draft for speculative decoding. + --gpu-layers-draft GPU_LAYERS_DRAFT Number of layers to offload to the GPU for the draft model. + --device-draft DEVICE_DRAFT Comma-separated list of devices to use for offloading the draft model. Example: CUDA0,CUDA1 + --ctx-size-draft CTX_SIZE_DRAFT Size of the prompt context for the draft model. If 0, uses the same as the main model. ExLlamaV2: - --gpu-split GPU_SPLIT Comma-separated list of VRAM (in GB) to use per GPU device for model layers. Example: 20,7,7. - --autosplit Autosplit the model tensors across the available GPUs. This causes --gpu-split to be ignored. - --max_seq_len MAX_SEQ_LEN Maximum sequence length. - --cfg-cache ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader. - --no_flash_attn Force flash-attention to not be used. - --no_xformers Force xformers to not be used. - --no_sdpa Force Torch SDPA to not be used. - --num_experts_per_token NUM_EXPERTS_PER_TOKEN Number of experts to use for generation. Applies to MoE models like Mixtral. - --enable_tp Enable Tensor Parallelism (TP) in ExLlamaV2. + --gpu-split GPU_SPLIT Comma-separated list of VRAM (in GB) to use per GPU device for model layers. Example: 20,7,7. + --autosplit Autosplit the model tensors across the available GPUs. This causes --gpu-split to be ignored. + --cfg-cache ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader. + --no_flash_attn Force flash-attention to not be used. + --no_xformers Force xformers to not be used. + --no_sdpa Force Torch SDPA to not be used. + --num_experts_per_token NUM_EXPERTS_PER_TOKEN Number of experts to use for generation. Applies to MoE models like Mixtral. + --enable_tp Enable Tensor Parallelism (TP) in ExLlamaV2. HQQ: - --hqq-backend HQQ_BACKEND Backend for the HQQ loader. Valid options: PYTORCH, PYTORCH_COMPILE, ATEN. + --hqq-backend HQQ_BACKEND Backend for the HQQ loader. Valid options: PYTORCH, PYTORCH_COMPILE, ATEN. TensorRT-LLM: - --cpp-runner Use the ModelRunnerCpp runner, which is faster than the default ModelRunner but doesn't support streaming yet. + --cpp-runner Use the ModelRunnerCpp runner, which is faster than the default ModelRunner but doesn't support streaming yet. Cache: - --cache_type CACHE_TYPE KV cache type; valid options: llama.cpp - fp16, q8_0, q4_0; ExLlamaV2 - fp16, fp8, q8, q6, q4. + --cache_type CACHE_TYPE KV cache type; valid options: llama.cpp - fp16, q8_0, q4_0; ExLlamaV2 - fp16, fp8, q8, q6, q4. DeepSpeed: - --deepspeed Enable the use of DeepSpeed ZeRO-3 for inference via the Transformers integration. - --nvme-offload-dir NVME_OFFLOAD_DIR DeepSpeed: Directory to use for ZeRO-3 NVME offloading. - --local_rank LOCAL_RANK DeepSpeed: Optional argument for distributed setups. + --deepspeed Enable the use of DeepSpeed ZeRO-3 for inference via the Transformers integration. + --nvme-offload-dir NVME_OFFLOAD_DIR DeepSpeed: Directory to use for ZeRO-3 NVME offloading. + --local_rank LOCAL_RANK DeepSpeed: Optional argument for distributed setups. RoPE: - --alpha_value ALPHA_VALUE Positional embeddings alpha factor for NTK RoPE scaling. Use either this or compress_pos_emb, not both. - --rope_freq_base ROPE_FREQ_BASE If greater than 0, will be used instead of alpha_value. Those two are related by rope_freq_base = 10000 * alpha_value ^ (64 / 63). - --compress_pos_emb COMPRESS_POS_EMB Positional embeddings compression factor. Should be set to (context length) / (model's original context length). Equal to 1/rope_freq_scale. + --alpha_value ALPHA_VALUE Positional embeddings alpha factor for NTK RoPE scaling. Use either this or compress_pos_emb, not both. + --rope_freq_base ROPE_FREQ_BASE If greater than 0, will be used instead of alpha_value. Those two are related by rope_freq_base = 10000 * alpha_value ^ (64 / 63). + --compress_pos_emb COMPRESS_POS_EMB Positional embeddings compression factor. Should be set to (context length) / (model's original context length). Equal to 1/rope_freq_scale. Gradio: - --listen Make the web UI reachable from your local network. - --listen-port LISTEN_PORT The listening port that the server will use. - --listen-host LISTEN_HOST The hostname that the server will use. - --share Create a public URL. This is useful for running the web UI on Google Colab or similar. - --auto-launch Open the web UI in the default browser upon launch. - --gradio-auth GRADIO_AUTH Set Gradio authentication password in the format "username:password". Multiple credentials can also be supplied with "u1:p1,u2:p2,u3:p3". - --gradio-auth-path GRADIO_AUTH_PATH Set the Gradio authentication file path. The file should contain one or more user:password pairs in the same format as above. - --ssl-keyfile SSL_KEYFILE The path to the SSL certificate key file. - --ssl-certfile SSL_CERTFILE The path to the SSL certificate cert file. - --subpath SUBPATH Customize the subpath for gradio, use with reverse proxy - --old-colors Use the legacy Gradio colors, before the December/2024 update. + --listen Make the web UI reachable from your local network. + --listen-port LISTEN_PORT The listening port that the server will use. + --listen-host LISTEN_HOST The hostname that the server will use. + --share Create a public URL. This is useful for running the web UI on Google Colab or similar. + --auto-launch Open the web UI in the default browser upon launch. + --gradio-auth GRADIO_AUTH Set Gradio authentication password in the format "username:password". Multiple credentials can also be supplied with "u1:p1,u2:p2,u3:p3". + --gradio-auth-path GRADIO_AUTH_PATH Set the Gradio authentication file path. The file should contain one or more user:password pairs in the same format as above. + --ssl-keyfile SSL_KEYFILE The path to the SSL certificate key file. + --ssl-certfile SSL_CERTFILE The path to the SSL certificate cert file. + --subpath SUBPATH Customize the subpath for gradio, use with reverse proxy + --old-colors Use the legacy Gradio colors, before the December/2024 update. API: - --api Enable the API extension. - --public-api Create a public URL for the API using Cloudfare. - --public-api-id PUBLIC_API_ID Tunnel ID for named Cloudflare Tunnel. Use together with public-api option. - --api-port API_PORT The listening port for the API. - --api-key API_KEY API authentication key. - --admin-key ADMIN_KEY API authentication key for admin tasks like loading and unloading models. If not set, will be the same as --api-key. - --api-enable-ipv6 Enable IPv6 for the API - --api-disable-ipv4 Disable IPv4 for the API - --nowebui Do not launch the Gradio UI. Useful for launching the API in standalone mode. + --api Enable the API extension. + --public-api Create a public URL for the API using Cloudfare. + --public-api-id PUBLIC_API_ID Tunnel ID for named Cloudflare Tunnel. Use together with public-api option. + --api-port API_PORT The listening port for the API. + --api-key API_KEY API authentication key. + --admin-key ADMIN_KEY API authentication key for admin tasks like loading and unloading models. If not set, will be the same as --api-key. + --api-enable-ipv6 Enable IPv6 for the API + --api-disable-ipv4 Disable IPv4 for the API + --nowebui Do not launch the Gradio UI. Useful for launching the API in standalone mode. ``` @@ -317,35 +326,37 @@ https://github.com/oobabooga/text-generation-webui/wiki ## Downloading models -Models should be placed in the folder `text-generation-webui/models`. They are usually downloaded from [Hugging Face](https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads). +Models should be placed in the folder `text-generation-webui/user_data/models`. They are usually downloaded from [Hugging Face](https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads). -* GGUF models are a single file and should be placed directly into `models`. Example: +* GGUF models are a single file and should be placed directly into `user_data/models`. Example: ``` text-generation-webui -└── models - └── llama-2-13b-chat.Q4_K_M.gguf +└── user_data + └── models + └── llama-2-13b-chat.Q4_K_M.gguf ``` * The remaining model types (like 16-bit Transformers models and EXL2 models) are made of several files and must be placed in a subfolder. Example: ``` text-generation-webui -├── models -│ ├── lmsys_vicuna-33b-v1.3 -│ │ ├── config.json -│ │ ├── generation_config.json -│ │ ├── pytorch_model-00001-of-00007.bin -│ │ ├── pytorch_model-00002-of-00007.bin -│ │ ├── pytorch_model-00003-of-00007.bin -│ │ ├── pytorch_model-00004-of-00007.bin -│ │ ├── pytorch_model-00005-of-00007.bin -│ │ ├── pytorch_model-00006-of-00007.bin -│ │ ├── pytorch_model-00007-of-00007.bin -│ │ ├── pytorch_model.bin.index.json -│ │ ├── special_tokens_map.json -│ │ ├── tokenizer_config.json -│ │ └── tokenizer.model +└── user_data + └── models + └── lmsys_vicuna-33b-v1.3 + ├── config.json + ├── generation_config.json + ├── pytorch_model-00001-of-00007.bin + ├── pytorch_model-00002-of-00007.bin + ├── pytorch_model-00003-of-00007.bin + ├── pytorch_model-00004-of-00007.bin + ├── pytorch_model-00005-of-00007.bin + ├── pytorch_model-00006-of-00007.bin + ├── pytorch_model-00007-of-00007.bin + ├── pytorch_model.bin.index.json + ├── special_tokens_map.json + ├── tokenizer_config.json + └── tokenizer.model ``` In both cases, you can use the "Model" tab of the UI to download the model from Hugging Face automatically. It is also possible to download it via the command-line with: diff --git a/download-model.py b/download-model.py index 8ff1d69c..25517491 100644 --- a/download-model.py +++ b/download-model.py @@ -1,5 +1,5 @@ ''' -Downloads models from Hugging Face to models/username_modelname. +Downloads models from Hugging Face to user_data/models/username_modelname. Example: python download-model.py facebook/opt-1.3b @@ -175,7 +175,7 @@ class ModelDownloader: if model_dir: base_folder = model_dir else: - base_folder = 'models' if not is_lora else 'loras' + base_folder = 'user_data/models' if not is_lora else 'user_data/loras' # If the model is of type GGUF, save directly in the base_folder if is_llamacpp: @@ -356,7 +356,7 @@ if __name__ == '__main__': parser.add_argument('--specific-file', type=str, default=None, help='Name of the specific file to download (if not provided, downloads all).') parser.add_argument('--exclude-pattern', type=str, default=None, help='Regex pattern to exclude files from download.') parser.add_argument('--output', type=str, default=None, help='Save the model files to this folder.') - parser.add_argument('--model-dir', type=str, default=None, help='Save the model files to a subfolder of this folder instead of the default one (text-generation-webui/models).') + parser.add_argument('--model-dir', type=str, default=None, help='Save the model files to a subfolder of this folder instead of the default one (text-generation-webui/user_data/models).') parser.add_argument('--clean', action='store_true', help='Does not resume the previous download.') parser.add_argument('--check', action='store_true', help='Validates the checksums of model files.') parser.add_argument('--max-retries', type=int, default=7, help='Max retries count when get error in download time.') diff --git a/extensions/Training_PRO/matplotgraph.py b/extensions/Training_PRO/matplotgraph.py index 348fc01a..b30bee83 100644 --- a/extensions/Training_PRO/matplotgraph.py +++ b/extensions/Training_PRO/matplotgraph.py @@ -59,4 +59,4 @@ def create_graph(lora_path, lora_name): print(f"File 'training_graph.json' does not exist in the {lora_path}") except ImportError: - print("matplotlib is not installed. Please install matplotlib to create PNG graphs") \ No newline at end of file + print("matplotlib is not installed. Please install matplotlib to create PNG graphs") diff --git a/extensions/Training_PRO/script.py b/extensions/Training_PRO/script.py index f553e482..cb11a8df 100644 --- a/extensions/Training_PRO/script.py +++ b/extensions/Training_PRO/script.py @@ -175,23 +175,23 @@ def ui(): with gr.Row(): with gr.Column(): with gr.Row(): - dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown']) - create_refresh_button(dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button') + dataset = gr.Dropdown(choices=get_datasets('user_data/training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown']) + create_refresh_button(dataset, lambda: None, lambda: {'choices': get_datasets('user_data/training/datasets', 'json')}, 'refresh-button') with gr.Row(): - eval_dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown']) - create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button') + eval_dataset = gr.Dropdown(choices=get_datasets('user_data/training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown']) + create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': get_datasets('user_data/training/datasets', 'json')}, 'refresh-button') with gr.Column(): with gr.Row(): - format = gr.Dropdown(choices=get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown']) - create_refresh_button(format, lambda: None, lambda: {'choices': get_datasets('training/formats', 'json')}, 'refresh-button') + format = gr.Dropdown(choices=get_datasets('user_data/training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown']) + create_refresh_button(format, lambda: None, lambda: {'choices': get_datasets('user_data/training/formats', 'json')}, 'refresh-button') with gr.Row(): eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.') with gr.Tab(label="Text file"): with gr.Row(): - raw_text_file = gr.Dropdown(choices=get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The text file to use for training.', elem_classes=['slim-dropdown']) - create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'txt')}, 'refresh-button') + raw_text_file = gr.Dropdown(choices=get_datasets('user_data/training/datasets', 'txt'), value='None', label='Text file', info='The text file to use for training.', elem_classes=['slim-dropdown']) + create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': get_datasets('user_data/training/datasets', 'txt')}, 'refresh-button') with gr.Row(): with gr.Column(): @@ -208,7 +208,7 @@ def ui(): download_file_url = gr.Textbox(label='Download JSON or txt file to datasets (or formats) folder', value='',info='The URL of a file to download. If on github, make sure you get url of the raw file (https://raw.githubusercontent.com/...). If huggin face, make sure the url has /resolve/ in it not /blob/') with gr.Row(): download_check_overwrite = gr.Checkbox(label='Overwrite', value=False, info='Overwrite if file exist') - download_folder = gr.Radio(label="Destination", value='training/datasets', choices=['training/datasets', 'training/formats'], interactive=True) + download_folder = gr.Radio(label="Destination", value='user_data/training/datasets', choices=['user_data/training/datasets', 'user_data/training/formats'], interactive=True) download_button = gr.Button('Download') download_status = gr.Textbox(label='Download Status', value='', interactive=False) with gr.Row(): @@ -235,7 +235,7 @@ def ui(): with gr.Row(): with gr.Column(): models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True) - evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.') + evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + get_datasets('user_data/training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under user_data/training/datasets.') with gr.Row(): with gr.Column(): stride_length = gr.Slider(label='Stride', minimum=1, maximum=2048, value=512, step=1, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.') @@ -310,7 +310,7 @@ def ui(): if raw_text_file not in ['None', '']: logger.info("Loading Text file...") - fullpath = clean_path('training/datasets', f'{raw_text_file}') + fullpath = clean_path('user_data/training/datasets', f'{raw_text_file}') fullpath = Path(fullpath) if fullpath.is_dir(): logger.info('Training path directory {}'.format(raw_text_file)) @@ -324,10 +324,10 @@ def ui(): logger.info(f"Loaded training file: {file_path.name}") else: try: - with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file: + with open(clean_path('user_data/training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file: raw_text = file.read().replace('\r', '') except: - yield f"{raw_text_file}.txt doesn't seem to exsist anymore... check your training/datasets folder" + yield f"{raw_text_file}.txt doesn't seem to exsist anymore... check your user_data/training/datasets folder" return @@ -353,7 +353,7 @@ def ui(): yield "Select format choice for dataset." return - with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile: + with open(clean_path('user_data/training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile: format_data: dict[str, str] = json.load(formatFile) def generate_prompt(data_point: dict[str, str]): @@ -381,7 +381,7 @@ def ui(): return tokenize_dummy(prompt) logger.info("Loading JSON datasets...") - data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json')) + data = load_dataset("json", data_files=clean_path('user_data/training/datasets', f'{dataset}.json')) data_keys = [] @@ -456,7 +456,7 @@ def ui(): #debug_slicer.change(lambda x: non_serialized_params.update({"debug_slicer": x}), debug_slicer, None) def update_dataset(): - return gr.update(choices=get_datasets('training/datasets', 'json')), gr.update(choices=get_datasets('training/datasets', 'txt')) + return gr.update(choices=get_datasets('user_data/training/datasets', 'json')), gr.update(choices=get_datasets('user_data/training/datasets', 'txt')) download_button.click(download_file_from_url, [download_file_url,download_check_overwrite,download_folder] , download_status).then(update_dataset,None,[dataset , raw_text_file]) @@ -670,7 +670,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch if raw_text_file not in ['None', '']: train_template["template_type"] = "raw_text" logger.info("Loading text file...") - fullpath = clean_path('training/datasets', f'{raw_text_file}') + fullpath = clean_path('user_data/training/datasets', f'{raw_text_file}') fullpath = Path(fullpath) if fullpath.is_dir(): logger.info('Training path directory {}'.format(raw_text_file)) @@ -683,7 +683,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch logger.info(f"Loaded training file: {file_path.name}") else: - with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file: + with open(clean_path('user_data/training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file: raw_text = file.read().replace('\r', '') # FPHAM PRECISE SLICING @@ -720,7 +720,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch train_template["template_type"] = "dataset" - with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile: + with open(clean_path('user_data/training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile: format_data: dict[str, str] = json.load(formatFile) # == store training prompt == @@ -742,7 +742,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch return tokenize(prompt, add_eos_token, add_bos_token) logger.info("Loading JSON datasets...") - data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json')) + data = load_dataset("json", data_files=clean_path('user_data/training/datasets', f'{dataset}.json')) train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30)) print(f"BOS: {add_bos_token} EOS: {add_eos_token}") @@ -751,7 +751,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch if eval_dataset == 'None': eval_data = None else: - eval_data = load_dataset("json", data_files=clean_path('training/datasets', f'{eval_dataset}.json')) + eval_data = load_dataset("json", data_files=clean_path('user_data/training/datasets', f'{eval_dataset}.json')) eval_data = eval_data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30)) # == We MUST reload model if it went through any previous training, even failed one == @@ -1157,11 +1157,11 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch decoded_entries.append({"value": decoded_text}) # Write the log file - Path('logs').mkdir(exist_ok=True) - with open(Path('logs/train_dataset_sample.json'), 'w') as json_file: + Path('user_data/logs').mkdir(exist_ok=True) + with open(Path('user_data/logs/train_dataset_sample.json'), 'w') as json_file: json.dump(decoded_entries, json_file, indent=4) - logger.info("Log file 'train_dataset_sample.json' created in the 'logs' directory.") + logger.info("Log file 'train_dataset_sample.json' created in the 'user_data/logs' directory.") except Exception as e: logger.error(f"Failed to create log file due to error: {e}") diff --git a/extensions/Training_PRO/train_utils.py b/extensions/Training_PRO/train_utils.py index 18686144..79994880 100644 --- a/extensions/Training_PRO/train_utils.py +++ b/extensions/Training_PRO/train_utils.py @@ -194,13 +194,13 @@ def precise_cut(text: str, overlap: bool, min_chars_cut: int, eos_to_hc: bool, c if debug_slicer: # Write the log file - Path('logs').mkdir(exist_ok=True) + Path('user_data/logs').mkdir(exist_ok=True) sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)} - output_file = "logs/sentencelist.json" + output_file = "user_data/logs/sentencelist.json" with open(output_file, 'w') as f: json.dump(sentencelist_dict, f,indent=2) - print("Saved sentencelist.json in logs folder") + print("Saved sentencelist.json in user_data/logs folder") return sentencelist @@ -281,13 +281,13 @@ def sliding_block_cut(text: str, min_chars_cut: int, eos_to_hc: bool, cutoff_len if debug_slicer: # Write the log file - Path('logs').mkdir(exist_ok=True) + Path('user_data/logs').mkdir(exist_ok=True) sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)} - output_file = "logs/sentencelist.json" + output_file = "user_data/logs/sentencelist.json" with open(output_file, 'w') as f: json.dump(sentencelist_dict, f,indent=2) - print("Saved sentencelist.json in logs folder") + print("Saved sentencelist.json in user_data/logs folder") return sentencelist diff --git a/extensions/gallery/script.py b/extensions/gallery/script.py index 76be4a58..8b242fb6 100644 --- a/extensions/gallery/script.py +++ b/extensions/gallery/script.py @@ -72,13 +72,13 @@ def generate_html(): global cards cards = [] # Iterate through files in image folder - for file in sorted(Path("characters").glob("*")): + for file in sorted(Path("user_data/characters").glob("*")): if file.suffix in [".json", ".yml", ".yaml"]: character = file.stem container_html = '
'
- if Path("cache/pfp_me.png").exists() else ''
+ f'
'
+ if Path("user_data/cache/pfp_me.png").exists() else ''
)
for i in range(len(history['visible'])):
diff --git a/modules/models_settings.py b/modules/models_settings.py
index d3ecd51f..ae589bb3 100644
--- a/modules/models_settings.py
+++ b/modules/models_settings.py
@@ -25,7 +25,7 @@ def get_fallback_settings():
def get_model_metadata(model):
model_settings = {}
- # Get settings from models/config.yaml and models/config-user.yaml
+ # Get settings from user_data/models/config.yaml and user_data/models/config-user.yaml
settings = shared.model_config
for pat in settings:
if re.match(pat.lower(), Path(model).name.lower()):
@@ -144,7 +144,7 @@ def get_model_metadata(model):
if 'rope_freq_base' in model_settings and model_settings['rope_freq_base'] == 10000:
model_settings.pop('rope_freq_base')
- # Apply user settings from models/config-user.yaml
+ # Apply user settings from user_data/models/config-user.yaml
settings = shared.user_config
for pat in settings:
if re.match(pat.lower(), Path(model).name.lower()):
@@ -223,7 +223,7 @@ def apply_model_settings_to_state(model, state):
def save_model_settings(model, state):
'''
- Save the settings for this model to models/config-user.yaml
+ Save the settings for this model to user_data/models/config-user.yaml
'''
if model == 'None':
yield ("Not saving the settings because no model is selected in the menu.")
diff --git a/modules/presets.py b/modules/presets.py
index 7cab2af0..a432bf52 100644
--- a/modules/presets.py
+++ b/modules/presets.py
@@ -58,7 +58,7 @@ def presets_params():
def load_preset(name, verbose=False):
generate_params = default_preset()
if name not in ['None', None, '']:
- path = Path(f'presets/{name}.yaml')
+ path = Path(f'user_data/presets/{name}.yaml')
if path.exists():
with open(path, 'r') as infile:
preset = yaml.safe_load(infile)
diff --git a/modules/prompts.py b/modules/prompts.py
index 565c2450..8f00cac2 100644
--- a/modules/prompts.py
+++ b/modules/prompts.py
@@ -7,7 +7,7 @@ def load_prompt(fname):
if fname in ['None', '']:
return ''
else:
- file_path = Path(f'prompts/{fname}.txt')
+ file_path = Path(f'user_data/prompts/{fname}.txt')
if not file_path.exists():
return ''
diff --git a/modules/shared.py b/modules/shared.py
index 96f65929..21e6dd00 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -78,8 +78,8 @@ group.add_argument('--multi-user', action='store_true', help='Multi-user mode. C
group.add_argument('--character', type=str, help='The name of the character to load in chat mode by default.')
group.add_argument('--model', type=str, help='Name of the model to load by default.')
group.add_argument('--lora', type=str, nargs='+', help='The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.')
-group.add_argument('--model-dir', type=str, default='models/', help='Path to directory with all the models.')
-group.add_argument('--lora-dir', type=str, default='loras/', help='Path to directory with all the loras.')
+group.add_argument('--model-dir', type=str, default='user_data/models', help='Path to directory with all the models.')
+group.add_argument('--lora-dir', type=str, default='user_data/loras', help='Path to directory with all the loras.')
group.add_argument('--model-menu', action='store_true', help='Show a model menu in the terminal when the web UI is first launched.')
group.add_argument('--settings', type=str, help='Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml, this file will be loaded by default without the need to use the --settings flag.')
group.add_argument('--extensions', type=str, nargs='+', help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
@@ -95,7 +95,7 @@ group = parser.add_argument_group('Transformers/Accelerate')
group.add_argument('--cpu', action='store_true', help='Use the CPU to generate text. Warning: Training on CPU is extremely slow.')
group.add_argument('--cpu-memory', type=float, default=0, help='Maximum CPU memory in GiB. Use this for CPU offloading.')
group.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
-group.add_argument('--disk-cache-dir', type=str, default='cache', help='Directory to save the disk cache to. Defaults to "cache".')
+group.add_argument('--disk-cache-dir', type=str, default='user_data/cache', help='Directory to save the disk cache to. Defaults to "user_data/cache".')
group.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision (using bitsandbytes).')
group.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
group.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces VRAM usage slightly, but it comes at a performance cost.')
@@ -207,7 +207,7 @@ group.add_argument('--nowebui', action='store_true', help='Do not launch the Gra
group = parser.add_argument_group('Deprecated')
# Handle CMD_FLAGS.txt
-cmd_flags_path = Path(__file__).parent.parent / "CMD_FLAGS.txt"
+cmd_flags_path = Path(__file__).parent.parent / "user_data" / "CMD_FLAGS.txt"
if cmd_flags_path.exists():
with cmd_flags_path.open('r', encoding='utf-8') as f:
cmd_flags = ' '.join(
diff --git a/modules/training.py b/modules/training.py
index 69142463..2354c39d 100644
--- a/modules/training.py
+++ b/modules/training.py
@@ -106,23 +106,23 @@ def create_ui():
with gr.Column():
with gr.Tab(label='Formatted Dataset'):
with gr.Row():
- format = gr.Dropdown(choices=utils.get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown'], interactive=not mu)
- ui.create_refresh_button(format, lambda: None, lambda: {'choices': utils.get_datasets('training/formats', 'json')}, 'refresh-button', interactive=not mu)
+ format = gr.Dropdown(choices=utils.get_datasets('user_data/training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown'], interactive=not mu)
+ ui.create_refresh_button(format, lambda: None, lambda: {'choices': utils.get_datasets('user_data/training/formats', 'json')}, 'refresh-button', interactive=not mu)
with gr.Row():
- dataset = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu)
- ui.create_refresh_button(dataset, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'json')}, 'refresh-button', interactive=not mu)
+ dataset = gr.Dropdown(choices=utils.get_datasets('user_data/training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu)
+ ui.create_refresh_button(dataset, lambda: None, lambda: {'choices': utils.get_datasets('user_data/training/datasets', 'json')}, 'refresh-button', interactive=not mu)
with gr.Row():
- eval_dataset = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'], interactive=not mu)
- ui.create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'json')}, 'refresh-button', interactive=not mu)
+ eval_dataset = gr.Dropdown(choices=utils.get_datasets('user_data/training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'], interactive=not mu)
+ ui.create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': utils.get_datasets('user_data/training/datasets', 'json')}, 'refresh-button', interactive=not mu)
eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.')
with gr.Tab(label="Raw text file"):
with gr.Row():
- raw_text_file = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The raw text file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu)
- ui.create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'txt')}, 'refresh-button', interactive=not mu)
+ raw_text_file = gr.Dropdown(choices=utils.get_datasets('user_data/training/datasets', 'txt'), value='None', label='Text file', info='The raw text file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu)
+ ui.create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': utils.get_datasets('user_data/training/datasets', 'txt')}, 'refresh-button', interactive=not mu)
with gr.Row():
with gr.Column():
@@ -143,7 +143,7 @@ def create_ui():
with gr.Row():
with gr.Column():
models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True, interactive=not mu)
- evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + utils.get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.', interactive=not mu)
+ evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + utils.get_datasets('user_data/training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under user_data/training/datasets.', interactive=not mu)
with gr.Row():
with gr.Column():
stride_length = gr.Slider(label='Stride', minimum=0, maximum=32768, value=512, step=256, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.')
@@ -402,7 +402,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
if raw_text_file not in ['None', '']:
train_template["template_type"] = "raw_text"
logger.info("Loading raw text file dataset")
- fullpath = clean_path('training/datasets', f'{raw_text_file}')
+ fullpath = clean_path('user_data/training/datasets', f'{raw_text_file}')
fullpath = Path(fullpath)
if fullpath.is_dir():
logger.info('Training path directory {}'.format(raw_text_file))
@@ -415,7 +415,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
logger.info(f"Loaded training file: {file_path.name}")
else:
- with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
+ with open(clean_path('user_data/training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
raw_text = file.read().replace('\r', '')
cut_string = hard_cut_string.replace('\\n', '\n')
@@ -460,7 +460,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
train_template["template_type"] = "dataset"
- with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
+ with open(clean_path('user_data/training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
format_data: dict[str, str] = json.load(formatFile)
# == store training prompt ==
@@ -482,13 +482,13 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
return tokenize(prompt, add_eos_token)
logger.info("Loading JSON datasets")
- data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json'))
+ data = load_dataset("json", data_files=clean_path('user_data/training/datasets', f'{dataset}.json'))
train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
if eval_dataset == 'None':
eval_data = None
else:
- eval_data = load_dataset("json", data_files=clean_path('training/datasets', f'{eval_dataset}.json'))
+ eval_data = load_dataset("json", data_files=clean_path('user_data/training/datasets', f'{eval_dataset}.json'))
eval_data = eval_data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
# == We MUST reload model if it went through any previous training, even failed one ==
@@ -676,11 +676,11 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
decoded_entries.append({"value": decoded_text})
# Write the log file
- Path('logs').mkdir(exist_ok=True)
- with open(Path('logs/train_dataset_sample.json'), 'w') as json_file:
+ Path('user_data/logs').mkdir(exist_ok=True)
+ with open(Path('user_data/logs/train_dataset_sample.json'), 'w') as json_file:
json.dump(decoded_entries, json_file, indent=4)
- logger.info("Log file 'train_dataset_sample.json' created in the 'logs' directory.")
+ logger.info("Log file 'train_dataset_sample.json' created in the 'user_data/logs' directory.")
except Exception as e:
logger.error(f"Failed to create log file due to error: {e}")
diff --git a/modules/transformers_loader.py b/modules/transformers_loader.py
index add3be66..905f5c47 100644
--- a/modules/transformers_loader.py
+++ b/modules/transformers_loader.py
@@ -249,7 +249,7 @@ def load_model_HF(model_name):
)
if shared.args.disk:
- params['offload_folder'] = shared.args.disk_cache_dir
+ params['offload_folder'] = str(Path(shared.args.disk_cache_dir))
if shared.args.compress_pos_emb > 1:
params['rope_scaling'] = {'type': 'linear', 'factor': shared.args.compress_pos_emb}
diff --git a/modules/ui.py b/modules/ui.py
index 68cb76a6..ef5ed0e6 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -94,7 +94,7 @@ if not shared.args.old_colors:
input_radius='0.375rem',
)
-if Path("notification.mp3").exists():
+if Path("user_data/notification.mp3").exists():
audio_notification_js = "document.querySelector('#audio_notification audio')?.play();"
else:
audio_notification_js = ""
diff --git a/modules/ui_chat.py b/modules/ui_chat.py
index b823b8e5..0d588549 100644
--- a/modules/ui_chat.py
+++ b/modules/ui_chat.py
@@ -146,7 +146,7 @@ def create_chat_settings_ui():
with gr.Column(scale=1):
shared.gradio['character_picture'] = gr.Image(label='Character picture', type='pil', interactive=not mu)
- shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('cache/pfp_me.png')) if Path('cache/pfp_me.png').exists() else None, interactive=not mu)
+ shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('user_data/cache/pfp_me.png')) if Path('user_data/cache/pfp_me.png').exists() else None, interactive=not mu)
with gr.Tab('Instruction template'):
with gr.Row():
diff --git a/modules/ui_default.py b/modules/ui_default.py
index ccae9a5e..c2946b37 100644
--- a/modules/ui_default.py
+++ b/modules/ui_default.py
@@ -102,7 +102,7 @@ def handle_save_prompt(text):
return [
text,
utils.current_time() + ".txt",
- "prompts/",
+ "user_data/prompts/",
gr.update(visible=True)
]
@@ -110,6 +110,6 @@ def handle_save_prompt(text):
def handle_delete_prompt(prompt):
return [
prompt + ".txt",
- "prompts/",
+ "user_data/prompts/",
gr.update(visible=True)
]
diff --git a/modules/ui_file_saving.py b/modules/ui_file_saving.py
index 3a27e1b9..d1f9379b 100644
--- a/modules/ui_file_saving.py
+++ b/modules/ui_file_saving.py
@@ -28,7 +28,7 @@ def create_ui():
# Character saver/deleter
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['character_saver']:
- shared.gradio['save_character_filename'] = gr.Textbox(lines=1, label='File name', info='The character will be saved to your characters/ folder with this base filename.')
+ shared.gradio['save_character_filename'] = gr.Textbox(lines=1, label='File name', info='The character will be saved to your user_data/characters folder with this base filename.')
with gr.Row():
shared.gradio['save_character_cancel'] = gr.Button('Cancel', elem_classes="small-button")
shared.gradio['save_character_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu)
@@ -41,7 +41,7 @@ def create_ui():
# Preset saver
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['preset_saver']:
- shared.gradio['save_preset_filename'] = gr.Textbox(lines=1, label='File name', info='The preset will be saved to your presets/ folder with this base filename.')
+ shared.gradio['save_preset_filename'] = gr.Textbox(lines=1, label='File name', info='The preset will be saved to your user_data/presets folder with this base filename.')
shared.gradio['save_preset_contents'] = gr.Textbox(lines=10, label='File contents')
with gr.Row():
shared.gradio['save_preset_cancel'] = gr.Button('Cancel', elem_classes="small-button")
@@ -72,7 +72,7 @@ def create_event_handlers():
def handle_save_preset_confirm_click(filename, contents):
try:
- utils.save_file(f"presets/{filename}.yaml", contents)
+ utils.save_file(f"user_data/presets/{filename}.yaml", contents)
available_presets = utils.get_available_presets()
output = gr.update(choices=available_presets, value=filename)
except Exception:
@@ -145,7 +145,7 @@ def handle_save_preset_click(state):
def handle_delete_preset_click(preset):
return [
f"{preset}.yaml",
- "presets/",
+ "user_data/presets/",
gr.update(visible=True)
]
@@ -154,7 +154,7 @@ def handle_save_grammar_click(grammar_string):
return [
grammar_string,
"My Fancy Grammar.gbnf",
- "grammars/",
+ "user_data/grammars/",
gr.update(visible=True)
]
@@ -162,6 +162,6 @@ def handle_save_grammar_click(grammar_string):
def handle_delete_grammar_click(grammar_file):
return [
grammar_file,
- "grammars/",
+ "user_data/grammars/",
gr.update(visible=True)
]
diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py
index 6bd647c6..dc09c899 100644
--- a/modules/ui_model_menu.py
+++ b/modules/ui_model_menu.py
@@ -223,9 +223,9 @@ def download_model_wrapper(repo_id, specific_file, progress=gr.Progress(), retur
model_dir=shared.args.model_dir if shared.args.model_dir != shared.args_defaults.model_dir else None
)
- if output_folder == Path("models"):
+ if output_folder == Path("user_data/models"):
output_folder = Path(shared.args.model_dir)
- elif output_folder == Path("loras"):
+ elif output_folder == Path("user_data/loras"):
output_folder = Path(shared.args.lora_dir)
if check:
diff --git a/modules/ui_parameters.py b/modules/ui_parameters.py
index 156e4128..6c2715af 100644
--- a/modules/ui_parameters.py
+++ b/modules/ui_parameters.py
@@ -128,7 +128,7 @@ def get_truncation_length():
def load_grammar(name):
- p = Path(f'grammars/{name}')
+ p = Path(f'user_data/grammars/{name}')
if p.exists():
return open(p, 'r', encoding='utf-8').read()
else:
diff --git a/modules/ui_session.py b/modules/ui_session.py
index 66386d12..42434e51 100644
--- a/modules/ui_session.py
+++ b/modules/ui_session.py
@@ -48,7 +48,7 @@ def handle_save_settings(state, preset, extensions, show_controls, theme):
return [
contents,
"settings.yaml",
- "./",
+ "./user_data",
gr.update(visible=True)
]
diff --git a/modules/utils.py b/modules/utils.py
index 269561aa..77324139 100644
--- a/modules/utils.py
+++ b/modules/utils.py
@@ -98,7 +98,7 @@ def get_available_models():
dirs_with_gguf = set()
for gguf_path in gguf_files:
path = Path(gguf_path)
- if path.parts:
+ if len(path.parts) > 0:
dirs_with_gguf.add(path.parts[0])
# Find directories with safetensors files
@@ -141,11 +141,11 @@ def get_available_ggufs():
def get_available_presets():
- return sorted(set((k.stem for k in Path('presets').glob('*.yaml'))), key=natural_keys)
+ return sorted(set((k.stem for k in Path('user_data/presets').glob('*.yaml'))), key=natural_keys)
def get_available_prompts():
- prompt_files = list(Path('prompts').glob('*.txt'))
+ prompt_files = list(Path('user_data/prompts').glob('*.txt'))
sorted_files = sorted(prompt_files, key=lambda x: x.stat().st_mtime, reverse=True)
prompts = [file.stem for file in sorted_files]
prompts.append('None')
@@ -153,12 +153,12 @@ def get_available_prompts():
def get_available_characters():
- paths = (x for x in Path('characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
+ paths = (x for x in Path('user_data/characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
return sorted(set((k.stem for k in paths)), key=natural_keys)
def get_available_instruction_templates():
- path = "instruction-templates"
+ path = "user_data/instruction-templates"
paths = []
if os.path.exists(path):
paths = (x for x in Path(path).iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
@@ -189,4 +189,4 @@ def get_available_chat_styles():
def get_available_grammars():
- return ['None'] + sorted([item.name for item in list(Path('grammars').glob('*.gbnf'))], key=natural_keys)
+ return ['None'] + sorted([item.name for item in list(Path('user_data/grammars').glob('*.gbnf'))], key=natural_keys)
diff --git a/one_click.py b/one_click.py
index 5e3d691b..065afd99 100644
--- a/one_click.py
+++ b/one_click.py
@@ -293,10 +293,10 @@ def install_webui():
# Write a flag to CMD_FLAGS.txt for CPU mode
if selected_gpu == "NONE":
- cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt")
+ cmd_flags_path = os.path.join(script_dir, "user_data", "CMD_FLAGS.txt")
with open(cmd_flags_path, 'r+') as cmd_flags_file:
if "--cpu" not in cmd_flags_file.read():
- print_big_message("Adding the --cpu flag to CMD_FLAGS.txt.")
+ print_big_message("Adding the --cpu flag to user_data/CMD_FLAGS.txt.")
cmd_flags_file.write("\n--cpu\n")
# Handle CUDA version display
@@ -532,7 +532,7 @@ if __name__ == "__main__":
flags_list = re.split(' +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)|=', flags)
model_dir = [flags_list[(flags_list.index(flag) + 1)] for flag in flags_list if flag == '--model-dir'][0].strip('"\'')
else:
- model_dir = 'models'
+ model_dir = 'user_data/models'
if len([item for item in glob.glob(f'{model_dir}/*') if not item.endswith(('.txt', '.yaml'))]) == 0:
print_big_message("You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.")
diff --git a/server.py b/server.py
index 41a5660d..01d40ac4 100644
--- a/server.py
+++ b/server.py
@@ -94,8 +94,8 @@ def create_interface():
'filter_by_loader': shared.args.loader or 'All'
})
- if Path("cache/pfp_character.png").exists():
- Path("cache/pfp_character.png").unlink()
+ if Path("user_data/cache/pfp_character.png").exists():
+ Path("user_data/cache/pfp_character.png").unlink()
# css/js strings
css = ui.css
@@ -112,8 +112,8 @@ def create_interface():
shared.gradio['interface_state'] = gr.State({k: None for k in shared.input_elements})
# Audio notification
- if Path("notification.mp3").exists():
- shared.gradio['audio_notification'] = gr.Audio(interactive=False, value="notification.mp3", elem_id="audio_notification", visible=False)
+ if Path("user_data/notification.mp3").exists():
+ shared.gradio['audio_notification'] = gr.Audio(interactive=False, value="user_data/notification.mp3", elem_id="audio_notification", visible=False)
# Floating menus for saving/deleting files
ui_file_saving.create_ui()
@@ -179,7 +179,7 @@ def create_interface():
ssl_keyfile=shared.args.ssl_keyfile,
ssl_certfile=shared.args.ssl_certfile,
root_path=shared.args.subpath,
- allowed_paths=["cache", "css", "extensions", "js"]
+ allowed_paths=["css", "js", "extensions", "user_data/cache"]
)
@@ -192,10 +192,10 @@ if __name__ == "__main__":
settings_file = None
if shared.args.settings is not None and Path(shared.args.settings).exists():
settings_file = Path(shared.args.settings)
- elif Path('settings.yaml').exists():
- settings_file = Path('settings.yaml')
- elif Path('settings.json').exists():
- settings_file = Path('settings.json')
+ elif Path('user_data/settings.yaml').exists():
+ settings_file = Path('user_data/settings.yaml')
+ elif Path('user_data/settings.json').exists():
+ settings_file = Path('user_data/settings.json')
if settings_file is not None:
logger.info(f"Loading settings from \"{settings_file}\"")
diff --git a/CMD_FLAGS.txt b/user_data/CMD_FLAGS.txt
similarity index 100%
rename from CMD_FLAGS.txt
rename to user_data/CMD_FLAGS.txt
diff --git a/characters/Assistant.yaml b/user_data/characters/Assistant.yaml
similarity index 100%
rename from characters/Assistant.yaml
rename to user_data/characters/Assistant.yaml
diff --git a/characters/Example.png b/user_data/characters/Example.png
similarity index 100%
rename from characters/Example.png
rename to user_data/characters/Example.png
diff --git a/characters/Example.yaml b/user_data/characters/Example.yaml
similarity index 100%
rename from characters/Example.yaml
rename to user_data/characters/Example.yaml
diff --git a/grammars/arithmetic.gbnf b/user_data/grammars/arithmetic.gbnf
similarity index 100%
rename from grammars/arithmetic.gbnf
rename to user_data/grammars/arithmetic.gbnf
diff --git a/grammars/c.gbnf b/user_data/grammars/c.gbnf
similarity index 100%
rename from grammars/c.gbnf
rename to user_data/grammars/c.gbnf
diff --git a/grammars/chess.gbnf b/user_data/grammars/chess.gbnf
similarity index 100%
rename from grammars/chess.gbnf
rename to user_data/grammars/chess.gbnf
diff --git a/grammars/json.gbnf b/user_data/grammars/json.gbnf
similarity index 100%
rename from grammars/json.gbnf
rename to user_data/grammars/json.gbnf
diff --git a/grammars/json_w_trailing_space.gbnf b/user_data/grammars/json_w_trailing_space.gbnf
similarity index 100%
rename from grammars/json_w_trailing_space.gbnf
rename to user_data/grammars/json_w_trailing_space.gbnf
diff --git a/grammars/list.gbnf b/user_data/grammars/list.gbnf
similarity index 100%
rename from grammars/list.gbnf
rename to user_data/grammars/list.gbnf
diff --git a/grammars/roleplay.gbnf b/user_data/grammars/roleplay.gbnf
similarity index 100%
rename from grammars/roleplay.gbnf
rename to user_data/grammars/roleplay.gbnf
diff --git a/grammars/simple_arithmetic.gbnf b/user_data/grammars/simple_arithmetic.gbnf
similarity index 100%
rename from grammars/simple_arithmetic.gbnf
rename to user_data/grammars/simple_arithmetic.gbnf
diff --git a/instruction-templates/Airoboros-v1.2.yaml b/user_data/instruction-templates/Airoboros-v1.2.yaml
similarity index 100%
rename from instruction-templates/Airoboros-v1.2.yaml
rename to user_data/instruction-templates/Airoboros-v1.2.yaml
diff --git a/instruction-templates/Alpaca.yaml b/user_data/instruction-templates/Alpaca.yaml
similarity index 100%
rename from instruction-templates/Alpaca.yaml
rename to user_data/instruction-templates/Alpaca.yaml
diff --git a/instruction-templates/Bactrian.yaml b/user_data/instruction-templates/Bactrian.yaml
similarity index 100%
rename from instruction-templates/Bactrian.yaml
rename to user_data/instruction-templates/Bactrian.yaml
diff --git a/instruction-templates/Baichuan Chat.yaml b/user_data/instruction-templates/Baichuan Chat.yaml
similarity index 100%
rename from instruction-templates/Baichuan Chat.yaml
rename to user_data/instruction-templates/Baichuan Chat.yaml
diff --git a/instruction-templates/Baize.yaml b/user_data/instruction-templates/Baize.yaml
similarity index 100%
rename from instruction-templates/Baize.yaml
rename to user_data/instruction-templates/Baize.yaml
diff --git a/instruction-templates/Bluemoon.yaml b/user_data/instruction-templates/Bluemoon.yaml
similarity index 100%
rename from instruction-templates/Bluemoon.yaml
rename to user_data/instruction-templates/Bluemoon.yaml
diff --git a/instruction-templates/ChatGLM.yaml b/user_data/instruction-templates/ChatGLM.yaml
similarity index 100%
rename from instruction-templates/ChatGLM.yaml
rename to user_data/instruction-templates/ChatGLM.yaml
diff --git a/instruction-templates/ChatML.yaml b/user_data/instruction-templates/ChatML.yaml
similarity index 100%
rename from instruction-templates/ChatML.yaml
rename to user_data/instruction-templates/ChatML.yaml
diff --git a/instruction-templates/Chinese-Vicuna-Chat.yaml b/user_data/instruction-templates/Chinese-Vicuna-Chat.yaml
similarity index 100%
rename from instruction-templates/Chinese-Vicuna-Chat.yaml
rename to user_data/instruction-templates/Chinese-Vicuna-Chat.yaml
diff --git a/instruction-templates/Command-R.yaml b/user_data/instruction-templates/Command-R.yaml
similarity index 100%
rename from instruction-templates/Command-R.yaml
rename to user_data/instruction-templates/Command-R.yaml
diff --git a/instruction-templates/Galactica Cite.yaml b/user_data/instruction-templates/Galactica Cite.yaml
similarity index 100%
rename from instruction-templates/Galactica Cite.yaml
rename to user_data/instruction-templates/Galactica Cite.yaml
diff --git a/instruction-templates/Galactica Finetuned.yaml b/user_data/instruction-templates/Galactica Finetuned.yaml
similarity index 100%
rename from instruction-templates/Galactica Finetuned.yaml
rename to user_data/instruction-templates/Galactica Finetuned.yaml
diff --git a/instruction-templates/Galactica Q.yaml b/user_data/instruction-templates/Galactica Q.yaml
similarity index 100%
rename from instruction-templates/Galactica Q.yaml
rename to user_data/instruction-templates/Galactica Q.yaml
diff --git a/instruction-templates/Galactica Summary.yaml b/user_data/instruction-templates/Galactica Summary.yaml
similarity index 100%
rename from instruction-templates/Galactica Summary.yaml
rename to user_data/instruction-templates/Galactica Summary.yaml
diff --git a/instruction-templates/Galactica Work.yaml b/user_data/instruction-templates/Galactica Work.yaml
similarity index 100%
rename from instruction-templates/Galactica Work.yaml
rename to user_data/instruction-templates/Galactica Work.yaml
diff --git a/instruction-templates/Galactica v2.yaml b/user_data/instruction-templates/Galactica v2.yaml
similarity index 100%
rename from instruction-templates/Galactica v2.yaml
rename to user_data/instruction-templates/Galactica v2.yaml
diff --git a/instruction-templates/Galactica.yaml b/user_data/instruction-templates/Galactica.yaml
similarity index 100%
rename from instruction-templates/Galactica.yaml
rename to user_data/instruction-templates/Galactica.yaml
diff --git a/instruction-templates/Gorilla.yaml b/user_data/instruction-templates/Gorilla.yaml
similarity index 100%
rename from instruction-templates/Gorilla.yaml
rename to user_data/instruction-templates/Gorilla.yaml
diff --git a/instruction-templates/Guanaco non-chat.yaml b/user_data/instruction-templates/Guanaco non-chat.yaml
similarity index 100%
rename from instruction-templates/Guanaco non-chat.yaml
rename to user_data/instruction-templates/Guanaco non-chat.yaml
diff --git a/instruction-templates/Guanaco-QLoRA.yaml b/user_data/instruction-templates/Guanaco-QLoRA.yaml
similarity index 100%
rename from instruction-templates/Guanaco-QLoRA.yaml
rename to user_data/instruction-templates/Guanaco-QLoRA.yaml
diff --git a/instruction-templates/H2O-prompt_answer.yaml b/user_data/instruction-templates/H2O-prompt_answer.yaml
similarity index 100%
rename from instruction-templates/H2O-prompt_answer.yaml
rename to user_data/instruction-templates/H2O-prompt_answer.yaml
diff --git a/instruction-templates/Hippogriff.yaml b/user_data/instruction-templates/Hippogriff.yaml
similarity index 100%
rename from instruction-templates/Hippogriff.yaml
rename to user_data/instruction-templates/Hippogriff.yaml
diff --git a/instruction-templates/INCITE-Chat.yaml b/user_data/instruction-templates/INCITE-Chat.yaml
similarity index 100%
rename from instruction-templates/INCITE-Chat.yaml
rename to user_data/instruction-templates/INCITE-Chat.yaml
diff --git a/instruction-templates/INCITE-Instruct.yaml b/user_data/instruction-templates/INCITE-Instruct.yaml
similarity index 100%
rename from instruction-templates/INCITE-Instruct.yaml
rename to user_data/instruction-templates/INCITE-Instruct.yaml
diff --git a/instruction-templates/KoAlpaca.yaml b/user_data/instruction-templates/KoAlpaca.yaml
similarity index 100%
rename from instruction-templates/KoAlpaca.yaml
rename to user_data/instruction-templates/KoAlpaca.yaml
diff --git a/instruction-templates/Koala.yaml b/user_data/instruction-templates/Koala.yaml
similarity index 100%
rename from instruction-templates/Koala.yaml
rename to user_data/instruction-templates/Koala.yaml
diff --git a/instruction-templates/LLaVA.yaml b/user_data/instruction-templates/LLaVA.yaml
similarity index 100%
rename from instruction-templates/LLaVA.yaml
rename to user_data/instruction-templates/LLaVA.yaml
diff --git a/instruction-templates/Llama-v2.yaml b/user_data/instruction-templates/Llama-v2.yaml
similarity index 100%
rename from instruction-templates/Llama-v2.yaml
rename to user_data/instruction-templates/Llama-v2.yaml
diff --git a/instruction-templates/Llama-v3.yaml b/user_data/instruction-templates/Llama-v3.yaml
similarity index 100%
rename from instruction-templates/Llama-v3.yaml
rename to user_data/instruction-templates/Llama-v3.yaml
diff --git a/instruction-templates/MOSS.yaml b/user_data/instruction-templates/MOSS.yaml
similarity index 100%
rename from instruction-templates/MOSS.yaml
rename to user_data/instruction-templates/MOSS.yaml
diff --git a/instruction-templates/Manticore Chat.yaml b/user_data/instruction-templates/Manticore Chat.yaml
similarity index 100%
rename from instruction-templates/Manticore Chat.yaml
rename to user_data/instruction-templates/Manticore Chat.yaml
diff --git a/instruction-templates/Metharme.yaml b/user_data/instruction-templates/Metharme.yaml
similarity index 100%
rename from instruction-templates/Metharme.yaml
rename to user_data/instruction-templates/Metharme.yaml
diff --git a/instruction-templates/Mistral.yaml b/user_data/instruction-templates/Mistral.yaml
similarity index 100%
rename from instruction-templates/Mistral.yaml
rename to user_data/instruction-templates/Mistral.yaml
diff --git a/instruction-templates/NVIDIA-ChatQA.yaml b/user_data/instruction-templates/NVIDIA-ChatQA.yaml
similarity index 100%
rename from instruction-templates/NVIDIA-ChatQA.yaml
rename to user_data/instruction-templates/NVIDIA-ChatQA.yaml
diff --git a/instruction-templates/NewHope.yaml b/user_data/instruction-templates/NewHope.yaml
similarity index 100%
rename from instruction-templates/NewHope.yaml
rename to user_data/instruction-templates/NewHope.yaml
diff --git a/instruction-templates/Open Assistant.yaml b/user_data/instruction-templates/Open Assistant.yaml
similarity index 100%
rename from instruction-templates/Open Assistant.yaml
rename to user_data/instruction-templates/Open Assistant.yaml
diff --git a/instruction-templates/OpenBuddy.yaml b/user_data/instruction-templates/OpenBuddy.yaml
similarity index 100%
rename from instruction-templates/OpenBuddy.yaml
rename to user_data/instruction-templates/OpenBuddy.yaml
diff --git a/instruction-templates/OpenChat.yaml b/user_data/instruction-templates/OpenChat.yaml
similarity index 100%
rename from instruction-templates/OpenChat.yaml
rename to user_data/instruction-templates/OpenChat.yaml
diff --git a/instruction-templates/OpenOrca-Platypus2.yaml b/user_data/instruction-templates/OpenOrca-Platypus2.yaml
similarity index 100%
rename from instruction-templates/OpenOrca-Platypus2.yaml
rename to user_data/instruction-templates/OpenOrca-Platypus2.yaml
diff --git a/instruction-templates/Orca Mini.yaml b/user_data/instruction-templates/Orca Mini.yaml
similarity index 100%
rename from instruction-templates/Orca Mini.yaml
rename to user_data/instruction-templates/Orca Mini.yaml
diff --git a/instruction-templates/Orca-Vicuna.yaml b/user_data/instruction-templates/Orca-Vicuna.yaml
similarity index 100%
rename from instruction-templates/Orca-Vicuna.yaml
rename to user_data/instruction-templates/Orca-Vicuna.yaml
diff --git a/instruction-templates/RWKV-Raven.yaml b/user_data/instruction-templates/RWKV-Raven.yaml
similarity index 100%
rename from instruction-templates/RWKV-Raven.yaml
rename to user_data/instruction-templates/RWKV-Raven.yaml
diff --git a/instruction-templates/RWKV-World.yaml b/user_data/instruction-templates/RWKV-World.yaml
similarity index 100%
rename from instruction-templates/RWKV-World.yaml
rename to user_data/instruction-templates/RWKV-World.yaml
diff --git a/instruction-templates/Samantha.yaml b/user_data/instruction-templates/Samantha.yaml
similarity index 100%
rename from instruction-templates/Samantha.yaml
rename to user_data/instruction-templates/Samantha.yaml
diff --git a/instruction-templates/StableBeluga2.yaml b/user_data/instruction-templates/StableBeluga2.yaml
similarity index 100%
rename from instruction-templates/StableBeluga2.yaml
rename to user_data/instruction-templates/StableBeluga2.yaml
diff --git a/instruction-templates/StableLM.yaml b/user_data/instruction-templates/StableLM.yaml
similarity index 100%
rename from instruction-templates/StableLM.yaml
rename to user_data/instruction-templates/StableLM.yaml
diff --git a/instruction-templates/StableVicuna.yaml b/user_data/instruction-templates/StableVicuna.yaml
similarity index 100%
rename from instruction-templates/StableVicuna.yaml
rename to user_data/instruction-templates/StableVicuna.yaml
diff --git a/instruction-templates/Starchat-Beta.yaml b/user_data/instruction-templates/Starchat-Beta.yaml
similarity index 100%
rename from instruction-templates/Starchat-Beta.yaml
rename to user_data/instruction-templates/Starchat-Beta.yaml
diff --git a/instruction-templates/Synthia-CoT.yaml b/user_data/instruction-templates/Synthia-CoT.yaml
similarity index 100%
rename from instruction-templates/Synthia-CoT.yaml
rename to user_data/instruction-templates/Synthia-CoT.yaml
diff --git a/instruction-templates/Synthia.yaml b/user_data/instruction-templates/Synthia.yaml
similarity index 100%
rename from instruction-templates/Synthia.yaml
rename to user_data/instruction-templates/Synthia.yaml
diff --git a/instruction-templates/Tulu.yaml b/user_data/instruction-templates/Tulu.yaml
similarity index 100%
rename from instruction-templates/Tulu.yaml
rename to user_data/instruction-templates/Tulu.yaml
diff --git a/instruction-templates/Vicuna-v0.yaml b/user_data/instruction-templates/Vicuna-v0.yaml
similarity index 100%
rename from instruction-templates/Vicuna-v0.yaml
rename to user_data/instruction-templates/Vicuna-v0.yaml
diff --git a/instruction-templates/Vicuna-v1.1.yaml b/user_data/instruction-templates/Vicuna-v1.1.yaml
similarity index 100%
rename from instruction-templates/Vicuna-v1.1.yaml
rename to user_data/instruction-templates/Vicuna-v1.1.yaml
diff --git a/instruction-templates/Vigogne-Chat.yaml b/user_data/instruction-templates/Vigogne-Chat.yaml
similarity index 100%
rename from instruction-templates/Vigogne-Chat.yaml
rename to user_data/instruction-templates/Vigogne-Chat.yaml
diff --git a/instruction-templates/Vigogne-Instruct.yaml b/user_data/instruction-templates/Vigogne-Instruct.yaml
similarity index 100%
rename from instruction-templates/Vigogne-Instruct.yaml
rename to user_data/instruction-templates/Vigogne-Instruct.yaml
diff --git a/instruction-templates/Wizard-Mega ShareGPT.yaml b/user_data/instruction-templates/Wizard-Mega ShareGPT.yaml
similarity index 100%
rename from instruction-templates/Wizard-Mega ShareGPT.yaml
rename to user_data/instruction-templates/Wizard-Mega ShareGPT.yaml
diff --git a/instruction-templates/Wizard-Mega.yaml b/user_data/instruction-templates/Wizard-Mega.yaml
similarity index 100%
rename from instruction-templates/Wizard-Mega.yaml
rename to user_data/instruction-templates/Wizard-Mega.yaml
diff --git a/instruction-templates/Ziya.yaml b/user_data/instruction-templates/Ziya.yaml
similarity index 100%
rename from instruction-templates/Ziya.yaml
rename to user_data/instruction-templates/Ziya.yaml
diff --git a/loras/place-your-loras-here.txt b/user_data/loras/place-your-loras-here.txt
similarity index 100%
rename from loras/place-your-loras-here.txt
rename to user_data/loras/place-your-loras-here.txt
diff --git a/models/config.yaml b/user_data/models/config.yaml
similarity index 100%
rename from models/config.yaml
rename to user_data/models/config.yaml
diff --git a/models/place-your-models-here.txt b/user_data/models/place-your-models-here.txt
similarity index 100%
rename from models/place-your-models-here.txt
rename to user_data/models/place-your-models-here.txt
diff --git a/presets/Contrastive Search.yaml b/user_data/presets/Contrastive Search.yaml
similarity index 100%
rename from presets/Contrastive Search.yaml
rename to user_data/presets/Contrastive Search.yaml
diff --git a/presets/Creative.yaml b/user_data/presets/Creative.yaml
similarity index 100%
rename from presets/Creative.yaml
rename to user_data/presets/Creative.yaml
diff --git a/presets/Deterministic.yaml b/user_data/presets/Deterministic.yaml
similarity index 100%
rename from presets/Deterministic.yaml
rename to user_data/presets/Deterministic.yaml
diff --git a/presets/Instruct.yaml b/user_data/presets/Instruct.yaml
similarity index 100%
rename from presets/Instruct.yaml
rename to user_data/presets/Instruct.yaml
diff --git a/presets/Null preset.yaml b/user_data/presets/Null preset.yaml
similarity index 100%
rename from presets/Null preset.yaml
rename to user_data/presets/Null preset.yaml
diff --git a/presets/min_p.yaml b/user_data/presets/min_p.yaml
similarity index 100%
rename from presets/min_p.yaml
rename to user_data/presets/min_p.yaml
diff --git a/prompts/Alpaca-with-Input.txt b/user_data/prompts/Alpaca-with-Input.txt
similarity index 100%
rename from prompts/Alpaca-with-Input.txt
rename to user_data/prompts/Alpaca-with-Input.txt
diff --git a/prompts/QA.txt b/user_data/prompts/QA.txt
similarity index 100%
rename from prompts/QA.txt
rename to user_data/prompts/QA.txt
diff --git a/training/datasets/put-trainer-datasets-here.txt b/user_data/training/datasets/put-trainer-datasets-here.txt
similarity index 100%
rename from training/datasets/put-trainer-datasets-here.txt
rename to user_data/training/datasets/put-trainer-datasets-here.txt
diff --git a/training/formats/ChatML-format.json b/user_data/training/formats/ChatML-format.json
similarity index 100%
rename from training/formats/ChatML-format.json
rename to user_data/training/formats/ChatML-format.json
diff --git a/training/formats/alpaca-chatbot-format.json b/user_data/training/formats/alpaca-chatbot-format.json
similarity index 100%
rename from training/formats/alpaca-chatbot-format.json
rename to user_data/training/formats/alpaca-chatbot-format.json
diff --git a/training/formats/alpaca-format.json b/user_data/training/formats/alpaca-format.json
similarity index 100%
rename from training/formats/alpaca-format.json
rename to user_data/training/formats/alpaca-format.json
diff --git a/training/formats/llama2-chat-format.json b/user_data/training/formats/llama2-chat-format.json
similarity index 100%
rename from training/formats/llama2-chat-format.json
rename to user_data/training/formats/llama2-chat-format.json
diff --git a/training/formats/vicuna-format.json b/user_data/training/formats/vicuna-format.json
similarity index 100%
rename from training/formats/vicuna-format.json
rename to user_data/training/formats/vicuna-format.json