Commit 95727312 authored by Vladimir Mandic's avatar Vladimir Mandic Committed by GitHub

remove bytes -> gb conversion

parent 47534577
...@@ -467,26 +467,24 @@ class Api: ...@@ -467,26 +467,24 @@ class Api:
return TrainResponse(info = "train embedding error: {error}".format(error = error)) return TrainResponse(info = "train embedding error: {error}".format(error = error))
def get_memory(self): def get_memory(self):
def gb(val: float):
return round(val / 1024 / 1024 / 1024, 2)
try: try:
import os, psutil import os, psutil
process = psutil.Process(os.getpid()) process = psutil.Process(os.getpid())
res = process.memory_info() res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values
ram_total = 100 * res.rss / process.memory_percent() ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe
ram = { 'free': gb(ram_total - res.rss), 'used': gb(res.rss), 'total': gb(ram_total) } ram = { 'free': ram_total - res.rss, 'used': res.rss, 'total': ram_total }
except Exception as err: except Exception as err:
ram = { 'error': f'{err}' } ram = { 'error': f'{err}' }
try: try:
import torch import torch
if torch.cuda.is_available(): if torch.cuda.is_available():
s = torch.cuda.mem_get_info() s = torch.cuda.mem_get_info()
system = { 'free': gb(s[0]), 'used': gb(s[1] - s[0]), 'total': gb(s[1]) } system = { 'free': s[0], 'used': s[1] - s[0], 'total': s[1] }
s = dict(torch.cuda.memory_stats(shared.device)) s = dict(torch.cuda.memory_stats(shared.device))
allocated = { 'current': gb(s['allocated_bytes.all.current']), 'peak': gb(s['allocated_bytes.all.peak']) } allocated = { 'current': s['allocated_bytes.all.current'], 'peak': s['allocated_bytes.all.peak'] }
reserved = { 'current': gb(s['reserved_bytes.all.current']), 'peak': gb(s['reserved_bytes.all.peak']) } reserved = { 'current': s['reserved_bytes.all.current'], 'peak': s['reserved_bytes.all.peak'] }
active = { 'current': gb(s['active_bytes.all.current']), 'peak': gb(s['active_bytes.all.peak']) } active = { 'current': s['active_bytes.all.current'], 'peak': s['active_bytes.all.peak'] }
inactive = { 'current': gb(s['inactive_split_bytes.all.current']), 'peak': gb(s['inactive_split_bytes.all.peak']) } inactive = { 'current': s['inactive_split_bytes.all.current'], 'peak': s['inactive_split_bytes.all.peak'] }
warnings = { 'retries': s['num_alloc_retries'], 'oom': s['num_ooms'] } warnings = { 'retries': s['num_alloc_retries'], 'oom': s['num_ooms'] }
cuda = { cuda = {
'system': system, 'system': system,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment