diff --git a/modules/text_generation.py b/modules/text_generation.py index d6a87ce8..27c5de7d 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -295,8 +295,6 @@ def generate_reply_HF(question, original_question, state, stopping_strings=None, _StopEverythingStoppingCriteria ) - # Native ExLlamav3Model handles multimodal internally - no special routing needed - if shared.args.loader == 'Transformers': clear_torch_cache()