Merge pull request #7129 from oobabooga/dev

Merge dev branch
This commit is contained in:
oobabooga 2025-07-09 00:10:16 -03:00 committed by GitHub
commit 6338dc0051
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 106 additions and 91 deletions

View file

@ -107,7 +107,7 @@ jobs:
cp -r text-generation-webui "text-generation-webui-${VERSION_CLEAN}"
cd "text-generation-webui-${VERSION_CLEAN}"
rm -rf .git cmd* update_wizard* Colab-TextGen-GPU.ipynb docker setup.cfg .github .gitignore
# Remove extensions that need additional requirements
allowed=("character_bias" "gallery" "openai" "sd_api_pictures")
find extensions/ -mindepth 1 -maxdepth 1 -type d | grep -v -E "$(printf '%s|' "${allowed[@]}" | sed 's/|$//')" | xargs rm -rf
@ -163,6 +163,7 @@ jobs:
if [[ "$CUDA_VERSION" == "11.7" ]]; then
rm requirements_cuda_temp.txt
fi
rm -rf .git cmd* update_wizard* Colab-TextGen-GPU.ipynb docker setup.cfg .github .gitignore requirements/ one_click.py
# 6. Create ZIP file
cd ..

View file

@ -106,7 +106,7 @@ jobs:
cp -r text-generation-webui "text-generation-webui-${VERSION_CLEAN}"
cd "text-generation-webui-${VERSION_CLEAN}"
rm -rf .git cmd* update_wizard* Colab-TextGen-GPU.ipynb docker setup.cfg .github .gitignore
# Remove extensions that need additional requirements
allowed=("character_bias" "gallery" "openai" "sd_api_pictures")
find extensions/ -mindepth 1 -maxdepth 1 -type d | grep -v -E "$(printf '%s|' "${allowed[@]}" | sed 's/|$//')" | xargs rm -rf
@ -150,6 +150,9 @@ jobs:
echo "Installing Python packages from $REQ_FILE..."
$PIP_PATH install --target="./$PACKAGES_PATH" -r "$REQ_FILE"
# 5. Clean up
rm -rf .git cmd* update_wizard* Colab-TextGen-GPU.ipynb docker setup.cfg .github .gitignore requirements/ one_click.py
# 6. Create ZIP file
cd ..
ZIP_NAME="textgen-portable-${VERSION_CLEAN}-${PLATFORM}-vulkan.zip"

View file

@ -106,7 +106,7 @@ jobs:
cp -r text-generation-webui "text-generation-webui-${VERSION_CLEAN}"
cd "text-generation-webui-${VERSION_CLEAN}"
rm -rf .git cmd* update_wizard* Colab-TextGen-GPU.ipynb docker setup.cfg .github .gitignore
# Remove extensions that need additional requirements
allowed=("character_bias" "gallery" "openai" "sd_api_pictures")
find extensions/ -mindepth 1 -maxdepth 1 -type d | grep -v -E "$(printf '%s|' "${allowed[@]}" | sed 's/|$//')" | xargs rm -rf
@ -176,7 +176,10 @@ jobs:
echo "Installing Python packages from $REQ_FILE..."
$PIP_PATH install --target="./$PACKAGES_PATH" -r "$REQ_FILE"
# 5. Create ZIP file
# 5. Clean up
rm -rf .git cmd* update_wizard* Colab-TextGen-GPU.ipynb docker setup.cfg .github .gitignore requirements/ one_click.py
# 6. Create ZIP file
cd ..
ZIP_NAME="textgen-portable-${VERSION_CLEAN}-${PLATFORM}.zip"
echo "Creating archive: $ZIP_NAME"

View file

@ -18,6 +18,7 @@ Its goal is to become the [AUTOMATIC1111/stable-diffusion-webui](https://github.
- **File attachments**: Upload text files, PDF documents, and .docx documents to talk about their contents.
- **Web search**: Optionally search the internet with LLM-generated queries to add context to the conversation.
- Aesthetic UI with dark and light themes.
- Syntax highlighting for code blocks and LaTeX rendering for mathematical expressions.
- `instruct` mode for instruction-following (like ChatGPT), and `chat-instruct`/`chat` modes for talking to custom characters.
- Automatic prompt formatting using Jinja2 templates. You don't need to ever worry about prompt formats.
- Edit messages, navigate between message versions, and branch conversations at any point.

View file

@ -35,10 +35,6 @@
color: #f0f0f0; /* Light text color for readability */
}
.text p {
margin-top: 2px;
}
.username {
padding-left: 10px;
font-size: 20px;
@ -91,7 +87,6 @@
}
.message-body p {
margin-bottom: 0 !important;
font-size: 16px !important;
line-height: 1.5 !important;
color: #e0e0e0 !important; /* Light color for text */
@ -129,3 +124,16 @@
font-size: 18px; /* Smaller username for mobile */
}
}
/* Standard spacing from instruct style */
.chat .message-body :is(p, ul, ol) {
margin: 1.25em 0 !important;
}
.chat .message-body :is(p, ul, ol):first-child {
margin-top: 0 !important;
}
.chat .message-body :is(p, ul, ol):last-child {
margin-bottom: 0 !important;
}

View file

@ -38,10 +38,6 @@
text-shadow: 2px 2px 2px rgb(0 0 0 / 40%);
}
.text p {
margin-top: 2px;
}
.username {
padding-left: 10px;
font-size: 22px;
@ -87,7 +83,6 @@
}
.message-body p {
margin-bottom: 0 !important;
font-size: 18px !important;
line-height: 1.428571429 !important;
color: rgb(243 244 246) !important;
@ -135,3 +130,16 @@
font-size: 20px;
}
}
/* Standard spacing from instruct style */
.chat .message-body :is(p, ul, ol) {
margin: 1.25em 0 !important;
}
.chat .message-body :is(p, ul, ol):first-child {
margin-top: 0 !important;
}
.chat .message-body :is(p, ul, ol):last-child {
margin-bottom: 0 !important;
}

View file

@ -9,11 +9,6 @@
line-height: 22.5px !important;
}
.message-body {
margin-top: 3px;
font-size: 15px !important;
}
.circle-you {
width: 50px;
height: 50px;
@ -52,10 +47,6 @@
font-weight: 500;
}
.message-body p, .chat .message-body ul, .chat .message-body ol {
margin-bottom: 10px !important;
}
.dark .message-body p em {
color: rgb(138 138 138) !important;
}
@ -64,3 +55,16 @@
color: rgb(110 110 110) !important;
font-weight: 500;
}
/* Standard spacing from instruct style */
.chat .message-body :is(p, ul, ol) {
margin: 1.25em 0 !important;
}
.chat .message-body :is(p, ul, ol):first-child {
margin-top: 0 !important;
}
.chat .message-body :is(p, ul, ol):last-child {
margin-bottom: 0 !important;
}

View file

@ -68,17 +68,10 @@
max-width: 80%;
}
.text p {
margin-top: 5px;
}
.username {
font-weight: bold;
}
.message-body {
}
.message-body img {
max-width: 300px;
max-height: 300px;
@ -86,7 +79,6 @@
}
.message-body p {
margin-bottom: 0 !important;
font-size: 15px !important;
line-height: 1.428571429 !important;
font-weight: 500;
@ -99,3 +91,16 @@
.message-body p em {
color: rgb(110 110 110) !important;
}
/* Standard spacing from instruct style */
.chat .message-body :is(p, ul, ol) {
margin: 1.25em 0 !important;
}
.chat .message-body :is(p, ul, ol):first-child {
margin-top: 0 !important;
}
.chat .message-body :is(p, ul, ol):last-child {
margin-bottom: 0 !important;
}

View file

@ -83,10 +83,6 @@
font-weight: 400;
}
.message-body p:first-child {
margin-top: 0 !important;
}
.dark .message-body p em {
color: rgb(170 170 170) !important;
}
@ -100,6 +96,15 @@
margin-top: 8px;
}
.message-body p, .chat .message-body ul, .chat .message-body ol {
margin-bottom: 10px !important;
/* Standard spacing from instruct style */
.chat .message-body :is(p, ul, ol) {
margin: 1.25em 0 !important;
}
.chat .message-body :is(p, ul, ol):first-child {
margin-top: 0 !important;
}
.chat .message-body :is(p, ul, ol):last-child {
margin-bottom: 0 !important;
}

View file

@ -11,9 +11,8 @@
.readable-container p, .readable-container li {
font-size: 16px !important;
color: #efefef !important;
margin-bottom: 22px;
line-height: 1.4 !important;
color: #efefef !important;
}
.readable-container li > p {
@ -30,4 +29,17 @@
.readable-container .hoverable {
font-size: 14px;
}
}
/* Standard spacing from instruct style */
.readable-container :is(p, ul, ol) {
margin: 1.25em 0 !important;
}
.readable-container :is(p, ul, ol):first-child {
margin-top: 0 !important;
}
.readable-container :is(p, ul, ol):last-child {
margin-bottom: 0 !important;
}

View file

@ -51,10 +51,12 @@ For more information about the parameters, the [transformers documentation](http
* **guidance_scale**: The main parameter for Classifier-Free Guidance (CFG). [The paper](https://arxiv.org/pdf/2306.17806.pdf) suggests that 1.5 is a good value. It can be used in conjunction with a negative prompt or not.
* **Negative prompt**: Only used when `guidance_scale != 1`. It is most useful for instruct models and custom system messages. You place your full prompt in this field with the system message replaced with the default one for the model (like "You are Llama, a helpful assistant...") to make the model pay more attention to your custom system message.
* **penalty_alpha**: Contrastive Search is enabled by setting this to greater than zero and unchecking "do_sample". It should be used with a low value of top_k, for instance, top_k = 4.
* **mirostat_mode**: Activates the Mirostat sampling technique. It aims to control perplexity during sampling. See the [paper](https://arxiv.org/abs/2007.14966).
* **mirostat_tau**: No idea, see the paper for details. According to the Preset Arena, 8 is a good value.
* **mirostat_eta**: No idea, see the paper for details. According to the Preset Arena, 0.1 is a good value.
* **dynamic_temperature**: Activates Dynamic Temperature. This modifies temperature to range between "dynatemp_low" (minimum) and "dynatemp_high" (maximum), with an entropy-based scaling. The steepness of the curve is controlled by "dynatemp_exponent".
* **mirostat_mode**: Activates Mirostat sampling, an adaptive decoding method that dynamically controls output perplexity for higher-quality text generation. 0 is disabled. 1 is the classic Mirostat algorithm described in [the paper](https://arxiv.org/abs/2007.14966), but can be less stable, or “wobbly,” and produce less coherent text. 2 is the improved version that is more stable and has lower perplexity, recommended for most use cases.
*Note: Use either mirostat or dynamic_temperature, not both at the same time.*
* **mirostat_tau**: Target perplexity for Mirostat sampling. Controls how “surprising” the text is. Higher values = more diverse, lower = more predictable. Preset Arena suggests 8 as a good value.
* **mirostat_eta**: Learning rate for Mirostats perplexity adjustment. Higher values = adapts faster but less stable, lower values = slower but more stable. Preset Arena suggests 0.1 as a good value.
* **dynamic_temperature**: Activates Dynamic Temperature. This modifies temperature to range between "dynatemp_low" (minimum) and "dynatemp_high" (maximum), with an entropy-based scaling. The steepness of the curve is controlled by "dynatemp_exponent".
*Note: Use either dynamic_temperature or mirostat, not both at the same time.*
* **smoothing_factor**: Activates Quadratic Sampling. When `0 < smoothing_factor < 1`, the logits distribution becomes flatter. When `smoothing_factor > 1`, it becomes more peaked.
* **temperature_last**: Makes temperature the last sampler instead of the first. With this, you can remove low probability tokens with a sampler like min_p and then use a high temperature to make the model creative without losing coherency. Note: this parameter takes precedence over "Sampler priority". That means that `temperature`/`dynamic_temperature`/`quadratic_sampling` will be removed from wherever they are and moved to the end of the stack.
* **do_sample**: When unchecked, sampling is entirely disabled, and greedy decoding is used instead (the most likely token is always picked).

View file

@ -5,7 +5,6 @@
| Shortcut | Description |
|-------------------------|--------------------------------------------------|
| Esc | Stop generation |
| Tab | Switch between current tab and Parameters tab |
#### Chat tab
@ -15,6 +14,6 @@
| Ctrl + Enter | Regenerate |
| Alt + Enter | Continue |
| Ctrl + Shift + Backspace| Remove last |
| Ctrl + Shift + K | Copy last |
| Ctrl + Shift + L | Replace last |
| Ctrl + Shift + M | Impersonate |
| ← (Left Arrow) | Navigate to previous version of last assistant message |
| → (Right Arrow) | Navigate to next version of last assistant message (or regenerate if at latest version) |

View file

@ -147,11 +147,15 @@ window.isScrolled = false;
let scrollTimeout;
targetElement.addEventListener("scroll", function() {
// Add scrolling class to disable hover effects
targetElement.classList.add("scrolling");
let diff = targetElement.scrollHeight - targetElement.clientHeight;
if(Math.abs(targetElement.scrollTop - diff) <= 10 || diff == 0) {
let isAtBottomNow = Math.abs(targetElement.scrollTop - diff) <= 10 || diff == 0;
// Add scrolling class to disable hover effects
if (window.isScrolled || !isAtBottomNow) {
targetElement.classList.add("scrolling");
}
if(isAtBottomNow) {
window.isScrolled = false;
} else {
window.isScrolled = true;
@ -163,7 +167,6 @@ targetElement.addEventListener("scroll", function() {
targetElement.classList.remove("scrolling");
doSyntaxHighlighting(); // Only run after scrolling stops
}, 150);
});
// Create a MutationObserver instance
@ -1049,45 +1052,6 @@ new MutationObserver(() => addMiniDeletes()).observe(
);
addMiniDeletes();
//------------------------------------------------
// Maintain distance from bottom when input height changes
//------------------------------------------------
let wasAtBottom = false;
let preservedDistance = 0;
function checkIfAtBottom() {
const distanceFromBottom = targetElement.scrollHeight - targetElement.scrollTop - targetElement.clientHeight;
wasAtBottom = distanceFromBottom <= 1; // Allow for rounding errors
}
function preserveScrollPosition() {
preservedDistance = targetElement.scrollHeight - targetElement.scrollTop - targetElement.clientHeight;
}
function restoreScrollPosition() {
if (wasAtBottom) {
// Force to bottom
targetElement.scrollTop = targetElement.scrollHeight - targetElement.clientHeight;
} else {
// Restore original distance
targetElement.scrollTop = targetElement.scrollHeight - targetElement.clientHeight - preservedDistance;
}
}
// Check position before input
chatInput.addEventListener("beforeinput", () => {
checkIfAtBottom();
preserveScrollPosition();
});
// Restore after input
chatInput.addEventListener("input", () => {
requestAnimationFrame(() => restoreScrollPosition());
});
// Update wasAtBottom when user scrolls
targetElement.addEventListener("scroll", checkIfAtBottom);
//------------------------------------------------
// Fix autoscroll after fonts load
//------------------------------------------------