Add guest disk usage via virsh guestinfo --filesystem
Query each running VM's filesystem stats through the guest agent. Show root filesystem used/total GB with color-coded bar in VM table. Cached at 30s TTL since disk usage changes slowly. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
59
app.py
59
app.py
@@ -40,6 +40,11 @@ VM_LIVE_TTL = 5
|
||||
_prev_domstats = {"by_name": {}, "ts": 0}
|
||||
_prev_domstats_lock = threading.Lock()
|
||||
|
||||
# VM disk stats cache (guestinfo — per-VM calls, 30s TTL)
|
||||
_vm_disk_cache = {"data": {}, "ts": 0}
|
||||
_vm_disk_lock = threading.Lock()
|
||||
VM_DISK_TTL = 30
|
||||
|
||||
|
||||
def parse_proc_stat():
|
||||
cores = []
|
||||
@@ -236,6 +241,55 @@ def get_vm_live_stats():
|
||||
return live
|
||||
|
||||
|
||||
def get_vm_disk_stats(running_names):
|
||||
"""Get filesystem usage for running VMs via virsh guestinfo. Cached for VM_DISK_TTL seconds."""
|
||||
with _vm_disk_lock:
|
||||
now = time.time()
|
||||
if now - _vm_disk_cache["ts"] < VM_DISK_TTL:
|
||||
return _vm_disk_cache["data"]
|
||||
|
||||
disks = {}
|
||||
for name in running_names:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["sudo", "virsh", "guestinfo", name, "--filesystem"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode != 0:
|
||||
continue
|
||||
# Parse filesystem entries, find root mountpoint
|
||||
fs = {}
|
||||
for line in result.stdout.split("\n"):
|
||||
if ":" not in line:
|
||||
continue
|
||||
key, val = line.split(":", 1)
|
||||
key = key.strip()
|
||||
val = val.strip()
|
||||
fs[key] = val
|
||||
# Collect all filesystems
|
||||
count = int(fs.get("fs.count", 0))
|
||||
vm_disks = []
|
||||
for i in range(count):
|
||||
mp = fs.get(f"fs.{i}.mountpoint", "")
|
||||
total_b = int(fs.get(f"fs.{i}.total-bytes", 0))
|
||||
used_b = int(fs.get(f"fs.{i}.used-bytes", 0))
|
||||
if total_b > 0:
|
||||
vm_disks.append({
|
||||
"mountpoint": mp,
|
||||
"total_gb": round(total_b / (1024**3), 1),
|
||||
"used_gb": round(used_b / (1024**3), 1),
|
||||
})
|
||||
if vm_disks:
|
||||
disks[name] = vm_disks
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
with _vm_disk_lock:
|
||||
_vm_disk_cache["data"] = disks
|
||||
_vm_disk_cache["ts"] = time.time()
|
||||
return disks
|
||||
|
||||
|
||||
def get_vm_base_info():
|
||||
"""Get base VM info (dominfo). Cached for VM_BASE_TTL seconds."""
|
||||
with _vm_base_lock:
|
||||
@@ -286,9 +340,11 @@ def get_vm_base_info():
|
||||
|
||||
|
||||
def get_vms():
|
||||
"""Get VM list with live CPU % and memory usage merged in."""
|
||||
"""Get VM list with live CPU %, memory usage, and disk usage merged in."""
|
||||
base = get_vm_base_info()
|
||||
live = get_vm_live_stats()
|
||||
running_names = [vm["name"] for vm in base if vm["state"] == "running"]
|
||||
disks = get_vm_disk_stats(running_names)
|
||||
|
||||
result = []
|
||||
for vm in base:
|
||||
@@ -306,6 +362,7 @@ def get_vms():
|
||||
vm["cpu_percent"] = 0.0
|
||||
vm["memory_used_mb"] = 0
|
||||
vm["memory_total_mb"] = vm["memory_mb"] if vm["state"] == "running" else 0
|
||||
vm["disks"] = disks.get(vm["name"], [])
|
||||
result.append(vm)
|
||||
return result
|
||||
|
||||
|
||||
@@ -160,6 +160,7 @@
|
||||
.vm-mem-fill { height: 100%; border-radius: 4px; transition: width 0.3s; }
|
||||
.vm-table td.vm-cpu { font-weight: bold; }
|
||||
.vm-table td.vm-mem { white-space: nowrap; }
|
||||
.vm-table td.vm-disk { white-space: nowrap; }
|
||||
|
||||
@media (max-width: 700px) {
|
||||
body { padding: 12px; }
|
||||
@@ -353,7 +354,7 @@ function renderDetail(srv) {
|
||||
return a.name.localeCompare(b.name);
|
||||
});
|
||||
html += '<table class="vm-table"><thead><tr>' +
|
||||
'<th>Name</th><th>State</th><th>CPU</th><th>Memory</th><th>vCPUs</th><th>Autostart</th>' +
|
||||
'<th>Name</th><th>State</th><th>CPU</th><th>Memory</th><th>Disk</th><th>vCPUs</th><th>Autostart</th>' +
|
||||
'</tr></thead><tbody>';
|
||||
for (const vm of sorted) {
|
||||
const isRunning = vm.state === 'running';
|
||||
@@ -361,6 +362,14 @@ function renderDetail(srv) {
|
||||
const memUsed = vm.memory_used_mb || 0;
|
||||
const memTotal = vm.memory_total_mb || vm.memory_mb || 0;
|
||||
const memPct = memTotal > 0 ? (memUsed / memTotal * 100) : 0;
|
||||
const vmDisks = vm.disks || [];
|
||||
const rootDisk = vmDisks.find(d => d.mountpoint === '/') || vmDisks[0];
|
||||
let diskHtml = '—';
|
||||
if (isRunning && rootDisk) {
|
||||
const diskPct = rootDisk.total_gb > 0 ? (rootDisk.used_gb / rootDisk.total_gb * 100) : 0;
|
||||
diskHtml = rootDisk.used_gb + ' / ' + rootDisk.total_gb + ' GB' +
|
||||
'<div class="vm-mem-bar"><div class="vm-mem-fill" style="width:' + diskPct + '%;background:' + usageColor(diskPct) + '"></div></div>';
|
||||
}
|
||||
html += '<tr>' +
|
||||
'<td>' + vm.name + '</td>' +
|
||||
'<td><span class="vm-state ' + vmStateClass(vm.state) + '">' + vm.state + '</span></td>' +
|
||||
@@ -370,6 +379,7 @@ function renderDetail(srv) {
|
||||
'<div class="vm-mem-bar"><div class="vm-mem-fill" style="width:' + memPct + '%;background:' + usageColor(memPct) + '"></div></div>'
|
||||
: (isRunning ? formatMB(vm.memory_mb) : '—')) +
|
||||
'</td>' +
|
||||
'<td class="vm-disk">' + diskHtml + '</td>' +
|
||||
'<td>' + vm.vcpus + '</td>' +
|
||||
'<td>' + (vm.autostart ? 'yes' : 'no') + '</td>' +
|
||||
'</tr>';
|
||||
|
||||
Reference in New Issue
Block a user