fix: replace bare except clauses with except Exception (#7400)

This commit is contained in:
Sense_wang 2026-03-05 05:06:17 +08:00 committed by GitHub
parent 1d1f4dfc88
commit 7bf15ad933
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 21 additions and 21 deletions

View file

@ -357,7 +357,7 @@ async def handle_load_model(request_data: LoadModelRequest):
try:
OAImodels._load_model(to_dict(request_data))
return JSONResponse(content="OK")
except:
except Exception:
traceback.print_exc()
return HTTPException(status_code=400, detail="Failed to load the model.")
@ -378,7 +378,7 @@ async def handle_load_loras(request_data: LoadLorasRequest):
try:
OAImodels.load_loras(request_data.lora_names)
return JSONResponse(content="OK")
except:
except Exception:
traceback.print_exc()
return HTTPException(status_code=400, detail="Failed to apply the LoRA(s).")

View file

@ -264,7 +264,7 @@ def SD_api_address_update(address):
response = requests.get(url=f'{params["address"]}/sdapi/v1/sd-models')
response.raise_for_status()
# r = response.json()
except:
except Exception:
msg = "❌ No SD API endpoint on:"
return gr.Textbox.update(label=msg)
@ -284,7 +284,7 @@ def get_checkpoints():
options_json = options.json()
params['sd_checkpoint'] = options_json['sd_model_checkpoint']
params['checkpoint_list'] = [result["title"] for result in models.json()]
except:
except Exception:
params['sd_checkpoint'] = ""
params['checkpoint_list'] = []
@ -298,7 +298,7 @@ def load_checkpoint(checkpoint):
try:
requests.post(url=f'{params["address"]}/sdapi/v1/options', json=payload)
except:
except Exception:
pass
@ -307,7 +307,7 @@ def get_samplers():
response = requests.get(url=f'{params["address"]}/sdapi/v1/samplers')
response.raise_for_status()
samplers = [x["name"] for x in response.json()]
except:
except Exception:
samplers = []
return samplers

View file

@ -37,7 +37,7 @@ class Iteratorize:
ret = self.mfunc(callback=_callback, *args, **self.kwargs)
except StopNowException:
pass
except:
except Exception:
traceback.print_exc()
pass

View file

@ -1300,7 +1300,7 @@ def load_last_chat_state():
try:
with open(state_file, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
except Exception:
pass
return {"last_chats": {}}
@ -1372,7 +1372,7 @@ def load_history_json(file, history):
update_message_metadata(history['metadata'], "assistant", i, timestamp="")
return history
except:
except Exception:
return history
@ -1526,7 +1526,7 @@ def upload_character(file, img_path, tavern=False):
decoded_file = file if isinstance(file, str) else file.decode('utf-8')
try:
data = json.loads(decoded_file)
except:
except Exception:
data = yaml.safe_load(decoded_file)
if 'char_name' in data:

View file

@ -86,7 +86,7 @@ def calculate_perplexity(models, input_dataset, stride, _max_length):
update_model_parameters(model_settings) # hijacking the command-line arguments
unload_model()
shared.model, shared.tokenizer = load_model(model)
except:
except Exception:
cumulative_log += f"Failed to load `{model}`. Moving on.\n\n"
yield cumulative_log
continue

View file

@ -466,7 +466,7 @@ class LlamaServer:
response = self.session.get(health_url)
if response.status_code == 200:
break
except:
except Exception:
pass
time.sleep(1)
@ -559,5 +559,5 @@ def filter_stderr_with_progress(process_stderr):
finally:
try:
process_stderr.close()
except:
except Exception:
pass

View file

@ -121,7 +121,7 @@ def _get_next_logits(prompt, state, use_samplers, previous, top_logits=25, retur
if isinstance(key, bytes):
try:
key = key.decode()
except:
except Exception:
key = key.decode('latin')
output[key] = row[0]

View file

@ -53,7 +53,7 @@ def get_single(value_type, file):
value = file.read(value_length)
try:
value = value.decode('utf-8')
except:
except Exception:
pass
else:
type_str = _simple_value_packing.get(value_type)

View file

@ -33,5 +33,5 @@ def count_tokens(text):
try:
tokens = get_encoded_length(text)
return str(tokens)
except:
except Exception:
return '0'

View file

@ -503,7 +503,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
print("Model reloaded OK, continue with training.")
else:
return f"Failed to load {selected_model}."
except:
except Exception:
exc = traceback.format_exc()
logger.error('Failed to reload the model.')
print(exc)
@ -542,7 +542,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
logger.info("Loading existing LoRA data")
state_dict_peft = torch.load(f"{lora_file_path}/adapter_model.bin", weights_only=True)
set_peft_model_state_dict(lora_model, state_dict_peft)
except:
except Exception:
yield traceback.format_exc().replace('\n', '\n\n')
return

View file

@ -224,7 +224,7 @@ def load_model_wrapper(selected_model, loader, autoload=False):
yield f"Successfully loaded `{selected_model}`."
else:
yield f"Failed to load `{selected_model}`."
except:
except Exception:
exc = traceback.format_exc()
logger.error('Failed to load the model.')
print(exc)

View file

@ -71,7 +71,7 @@ def load_state():
try:
with open(state_file, 'r') as f:
return json.load(f)
except:
except Exception:
return {}
return {}

View file

@ -75,7 +75,7 @@ def signal_handler(sig, frame):
if shared.model and shared.model.__class__.__name__ == 'LlamaServer':
try:
shared.model.stop()
except:
except Exception:
pass
sys.exit(0)