Commit 57eb54b8 authored by Extraltodeus's avatar Extraltodeus Committed by GitHub

implement CUDA device selection by ID

parent f49c08ea
import sys, os, shlex
import contextlib import contextlib
import torch import torch
from modules import errors from modules import errors
# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility # has_mps is only available in nightly pytorch (for now), `getattr` for compatibility
...@@ -9,10 +8,26 @@ has_mps = getattr(torch, 'has_mps', False) ...@@ -9,10 +8,26 @@ has_mps = getattr(torch, 'has_mps', False)
cpu = torch.device("cpu") cpu = torch.device("cpu")
def extract_device_id(args, name):
for x in range(len(args)):
if name in args[x]: return args[x+1]
return None
def get_optimal_device(): def get_optimal_device():
if torch.cuda.is_available(): if torch.cuda.is_available():
return torch.device("cuda") # CUDA device selection support:
if "shared" not in sys.modules:
commandline_args = os.environ.get('COMMANDLINE_ARGS', "") #re-parse the commandline arguments because using the shared.py module creates an import loop.
sys.argv += shlex.split(commandline_args)
device_id = extract_device_id(sys.argv, '--device-id')
else:
device_id = shared.cmd_opts.device_id
if device_id is not None:
cuda_device = f"cuda:{device_id}"
return torch.device(cuda_device)
else:
return torch.device("cuda")
if has_mps: if has_mps:
return torch.device("mps") return torch.device("mps")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment