mirror of
https://github.com/neonbjb/tortoise-tts.git
synced 2026-01-27 10:54:18 +01:00
enable deepspeed on apple silicon
This commit is contained in:
parent
bf7976172e
commit
3d419a1cfb
7
setup.py
7
setup.py
|
|
@ -28,9 +28,10 @@ setuptools.setup(
|
|||
'scipy',
|
||||
'librosa',
|
||||
'transformers==4.31.0',
|
||||
'tokenizers==0.14.0',
|
||||
'scipy==1.13.1'
|
||||
# 'deepspeed==0.8.3',
|
||||
'tokenizers',
|
||||
'scipy==1.13.1',
|
||||
'deepspeed',
|
||||
'py-cpuinfo'
|
||||
],
|
||||
classifiers=[
|
||||
"Programming Language :: Python :: 3",
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ if __name__ == '__main__':
|
|||
parser.add_argument('--voice', type=str, help='Selects the voice to use for generation. See options in voices/ directory (and add your own!) '
|
||||
'Use the & character to join two voices together. Use a comma to perform inference on multiple voices.', default='random')
|
||||
parser.add_argument('--preset', type=str, help='Which voice preset to use.', default='fast')
|
||||
parser.add_argument('--use_deepspeed', type=str, help='Use deepspeed for speed bump.', default=False)
|
||||
parser.add_argument('--use_deepspeed', type=bool, help='Use deepspeed for speed bump.', default=False)
|
||||
parser.add_argument('--kv_cache', type=bool, help='If you disable this please wait for a long a time to get the output', default=True)
|
||||
parser.add_argument('--half', type=bool, help="float16(half) precision inference if True it's faster and take less vram and ram", default=True)
|
||||
parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/')
|
||||
|
|
@ -25,8 +25,6 @@ if __name__ == '__main__':
|
|||
parser.add_argument('--cvvp_amount', type=float, help='How much the CVVP model should influence the output.'
|
||||
'Increasing this can in some cases reduce the likelihood of multiple speakers. Defaults to 0 (disabled)', default=.0)
|
||||
args = parser.parse_args()
|
||||
if torch.backends.mps.is_available():
|
||||
args.use_deepspeed = False
|
||||
os.makedirs(args.output_path, exist_ok=True)
|
||||
tts = TextToSpeech(models_dir=args.model_dir, use_deepspeed=args.use_deepspeed, kv_cache=args.kv_cache, half=args.half)
|
||||
|
||||
|
|
|
|||
|
|
@ -30,8 +30,6 @@ if __name__ == '__main__':
|
|||
|
||||
|
||||
args = parser.parse_args()
|
||||
if torch.backends.mps.is_available():
|
||||
args.use_deepspeed = False
|
||||
tts = TextToSpeech(models_dir=args.model_dir, use_deepspeed=args.use_deepspeed, kv_cache=args.kv_cache, half=args.half)
|
||||
|
||||
outpath = args.output_path
|
||||
|
|
|
|||
|
|
@ -28,8 +28,6 @@ if __name__ == '__main__':
|
|||
|
||||
|
||||
args = parser.parse_args()
|
||||
if torch.backends.mps.is_available():
|
||||
args.use_deepspeed = False
|
||||
tts = TextToSpeech(models_dir=args.model_dir, use_deepspeed=args.use_deepspeed, kv_cache=args.kv_cache, half=args.half)
|
||||
|
||||
outpath = args.output_path
|
||||
|
|
|
|||
|
|
@ -37,8 +37,6 @@ if __name__ == '__main__':
|
|||
|
||||
|
||||
args = parser.parse_args()
|
||||
if torch.backends.mps.is_available():
|
||||
args.use_deepspeed = False
|
||||
tts = TextToSpeech(models_dir=args.model_dir, use_deepspeed=args.use_deepspeed, kv_cache=args.kv_cache, half=args.half)
|
||||
|
||||
outpath = args.output_path
|
||||
|
|
|
|||
Loading…
Reference in a new issue