diff --git a/cluster/builder.go b/cluster/builder.go
index 3bb70c0..7b0a617 100644
--- a/cluster/builder.go
+++ b/cluster/builder.go
@@ -1,10 +1,10 @@
package cluster
import (
- "log"
"time"
"git.staur.ca/stobbsm/clustvirt/lib/host"
+ "git.staur.ca/stobbsm/clustvirt/lib/log"
)
// ClusterBuilder is used to build a Cluster object, which can then be used
@@ -42,12 +42,17 @@ func (c *ClusterBuilder) DefaultHostURI(uri *host.URI) *ClusterBuilder {
func (c *ClusterBuilder) AddHost(h string) *ClusterBuilder {
if _, ok := c.cluster.hosts[h]; ok {
- log.Println("warning: trying to add duplicate host")
+ log.Warn("cluster.AddHost").
+ Str("hostname", h).
+ Msg("won't overwrite existing host")
return c
}
hc, err := host.ConnectHost(c.cluster.defaultURI, h)
if err != nil {
- log.Printf("failed to connect to host: %s, %s", h, err)
+ log.Error("cluster.AddHost").
+ Str("hostname", h).
+ Err(err).
+ Msg("failed to connect to host")
return c
}
c.cluster.hosts[h] = hc
@@ -56,12 +61,17 @@ func (c *ClusterBuilder) AddHost(h string) *ClusterBuilder {
func (c *ClusterBuilder) AddHostWithURI(h string, uri *host.URI) *ClusterBuilder {
if _, ok := c.cluster.hosts[h]; ok {
- log.Println("warning: trying to add duplicate host")
+ log.Warn("cluster.AddHostWithURI").
+ Str("hostname", h).
+ Msg("won't overwrite existing host")
return c
}
hc, err := host.ConnectHost(uri, h)
if err != nil {
- log.Printf("failed to connect to host: %s, %s", h, err)
+ log.Error("cluster.AddHostWithURI").
+ Str("hostname", h).
+ Err(err).
+ Msg("failed to connect to host")
return c
}
c.cluster.hosts[h] = hc
diff --git a/cluster/stats.go b/cluster/stats.go
index 3c755dc..ef9aefe 100644
--- a/cluster/stats.go
+++ b/cluster/stats.go
@@ -1,27 +1,29 @@
package cluster
+import "git.staur.ca/stobbsm/clustvirt/lib/storagepool"
+
// ClusterStats is used to gather stats for the entire cluster
// Combined with StatsDiff, we can get some basic cluster wide stats tracking
type ClusterStats struct {
// CPU Statistics including number of CPUs
- CPU CPUStats
+ CPU CPUStats
// Memory provides information about the amount of memory, including free and
// allocated memory
- Memory MemoryStats
+ Memory MemoryStats
// Storage provides information about storage pools, Only get's stats for active
// pools, and will not activate pools that are not already active.
// Trys to sort out shared file systems from local filesystems using the Type parameter
// of Host.StoragePoolInfo
Storage StorageStats
// Volume provides information on allocated volumes used in the cluster
- Volume VolumeStats
+ Volume VolumeStats
// VM provides VM specific counters for the cluster
- VM VMStats
+ VM VMStats
// Host provides Host information for the cluster
- Host HostStats
+ Host HostStats
// Network provices available networks, and how many are shared between hosts
Network NetworkStats
- // NetIF provides information about Libvirt allocated networks, usable by the
+ // NetIF provides information about Libvirt allocated networks, usable by the
// libvirt cluster
NetIF NetIFStats
@@ -52,6 +54,7 @@ type MemoryStats struct {
// StorageStats provides information about the available storage pools in the cluster,
// including the amount of space available, allocated, and how many pools are shared
// between hosts
+// All sizes are in Bytes
type StorageStats struct {
Total uint64
Used uint64
@@ -81,7 +84,7 @@ type VMStats struct {
type HostStats struct {
Count uint32
Available uint32
- Nodes uint32
+ Nodes uint32
}
// NetworkStats provides informatoin about the available Host network connections,
@@ -109,7 +112,7 @@ type DeviceStats struct {
// SecretStats provides the number of secrets defined throughout the cluster.
// Shared secrets are only counted once, and are recognized by their UUID
type SecretStats struct {
- Count uint32
+ Count uint32
Shared uint32
}
@@ -117,7 +120,7 @@ type SecretStats struct {
type StatDiff struct {
CPU CPUDiff
Memory MemoryDiff
- Storage StorageStats
+ Storage StorageDiff
Volume VolumeDiff
VM VMDiff
Host HostDiff
@@ -186,9 +189,9 @@ func (cs *ClusterStats) Update() {
cs.Host.Count++
cs.Host.Available++
- cs.CPU.Sockets += h.HostInfo.Sockets
- cs.CPU.Cores += h.HostInfo.Cores
- cs.CPU.Threads += h.HostInfo.Threads
+ cs.CPU.Sockets += uint32(h.HostInfo.CPU.Topology.Sockets)
+ cs.CPU.Cores += uint32(h.HostInfo.CPU.Topology.Cores)
+ cs.CPU.Threads += uint32(h.HostInfo.CPU.Topology.Threads)
cs.Memory.Total += h.NodeMemory.Total
cs.Memory.Free += h.NodeMemory.Free
@@ -204,128 +207,71 @@ func (cs *ClusterStats) Update() {
// Already counted this shared pool, move on
continue
}
- if sp.HAEnabled {
+ if isNetworkPool(sp.Type) {
countedSharedPools[sp.Name] = struct{}{}
}
- if !sp.Active {
- cs.Storage.Inactive++
- continue
- }
cs.Storage.Active++
- cs.Storage.Total += sp.Capacity
- cs.Storage.Used += sp.Allocation
- cs.Storage.Free += sp.Capacity - sp.Allocation
- // Volumes in the pool
- cs.Volume.Total += uint32(len(sp.Volumes))
- for range sp.Volumes {
- cs.Volume.Active++
- }
+ cs.Storage.Total += sp.Capacity.Value
+ cs.Storage.Used += sp.Allocation.Value
+ cs.Storage.Free += sp.Capacity.Value - sp.Allocation.Value
}
// VM Count
cs.VM.Count += uint32(len(h.VMList))
for _, vm := range h.VMList {
- cs.CPU.Allocated += uint32(vm.VCPUs)
- cs.Memory.Allocated += uint64(vm.Memory)
- if vm.Active {
- cs.VM.Started++
- continue
- }
- cs.VM.Stopped++
+ cs.CPU.Allocated += uint32(len(vm.VCPUs.VCPU))
+ cs.Memory.Allocated += uint64(vm.Memory.Value)
}
// Network count
cs.Network.Count += uint32(len(h.NetworkList))
- for _, ni := range h.NetworkList {
- if ni.Active {
- cs.Network.Active++
- continue
- }
- cs.Network.Inactive++
- }
+ cs.Network.Inactive++
}
}
// Diff returns a map of all the field and how they changed
func (cs *ClusterStats) Diff() StatDiff {
return StatDiff{
- CPU: struct {
- Sockets int
- Cores int
- Threads int
- Allocated int
- }{
+ CPU: CPUDiff{
Sockets: int(cs.CPU.Sockets) - int(cs.old.CPU.Sockets),
Cores: int(cs.CPU.Cores) - int(cs.old.CPU.Cores),
Threads: int(cs.CPU.Threads) - int(cs.old.CPU.Threads),
Allocated: int(cs.CPU.Allocated) - int(cs.old.CPU.Allocated),
},
- Memory: struct {
- Total int
- Free int
- Buffers int
- Cached int
- Allocated int
- }{
- Total: int(cs.old.Memory.Total - cs.Memory.Total),
- Free: int(cs.old.Memory.Free - cs.Memory.Free),
- Buffers: int(cs.old.Memory.Buffers - cs.Memory.Buffers),
- Cached: int(cs.old.Memory.Cached - cs.Memory.Cached),
- Allocated: int(cs.old.Memory.Allocated - cs.Memory.Allocated),
+ Memory: MemoryDiff{
+ Total: int64(cs.old.Memory.Total) - int64(cs.Memory.Total),
+ Free: int64(cs.old.Memory.Free) - int64(cs.Memory.Free),
+ Buffers: int64(cs.old.Memory.Buffers) - int64(cs.Memory.Buffers),
+ Cached: int64(cs.old.Memory.Cached) - int64(cs.Memory.Cached),
+ Allocated: int64(cs.old.Memory.Allocated) - int64(cs.Memory.Allocated),
},
- Storage: struct {
- Total int
- Used int
- Free int
- Active int
- Inactive int
- Pools int
- Volumes struct {
- Total int
- Active int
- Inactive int
- }
- }{
- Total: int(cs.old.Storage.Total - cs.Storage.Total),
- Used: int(cs.old.Storage.Used - cs.Storage.Used),
- Free: int(cs.old.Storage.Free - cs.Storage.Free),
- Active: int(cs.old.Storage.Active - cs.Storage.Active),
- Inactive: int(cs.old.Storage.Inactive - cs.Storage.Inactive),
- Pools: int(cs.old.Storage.Pools - cs.Storage.Pools),
- Volumes: struct {
- Total int
- Active int
- Inactive int
- }{
- Total: int(cs.old.Volume.Total - cs.Volume.Total),
- Active: int(cs.old.Volume.Active - cs.Volume.Active),
- Inactive: int(cs.old.Volume.Inactive - cs.Volume.Inactive),
- },
+ Storage: StorageDiff{
+ Total: int64(cs.old.Storage.Total) - int64(cs.Storage.Total),
+ Used: int64(cs.old.Storage.Used) - int64(cs.Storage.Used),
+ Free: int64(cs.old.Storage.Free) - int64(cs.Storage.Free),
+ Active: int64(cs.old.Storage.Active) - int64(cs.Storage.Active),
+ Inactive: int64(cs.old.Storage.Inactive) - int64(cs.Storage.Inactive),
+ Pools: int(cs.old.Storage.Pools) - int(cs.Storage.Pools),
},
- VM: struct {
- Count int
- Started int
- Stopped int
- }{
- Count: int(cs.old.VM.Count - cs.VM.Count),
- Started: int(cs.old.VM.Started - cs.VM.Started),
- Stopped: int(cs.old.VM.Stopped - cs.VM.Stopped),
+ Volume: VolumeDiff{
+ Total: int(cs.old.Volume.Total) - int(cs.Volume.Total),
+ Active: int(cs.old.Volume.Active) - int(cs.Volume.Active),
+ Inactive: int(cs.old.Volume.Inactive) - int(cs.Volume.Inactive),
},
- Host: struct {
- Count int
- Available int
- }{
- Count: int(cs.old.Host.Count - cs.Host.Count),
- Available: int(cs.old.Host.Available - cs.Host.Available),
+
+ VM: VMDiff{
+ Count: int(cs.old.VM.Count) - int(cs.VM.Count),
+ Started: int(cs.old.VM.Started) - int(cs.VM.Started),
+ Stopped: int(cs.old.VM.Stopped) - int(cs.VM.Stopped),
},
- Network: struct {
- Count int
- Active int
- Inactive int
- }{
- Count: int(cs.old.Network.Count - cs.Network.Count),
- Active: int(cs.old.Network.Active - cs.Network.Active),
- Inactive: int(cs.old.Network.Inactive - cs.Network.Inactive),
+ Host: HostDiff{
+ Count: int(cs.old.Host.Count) - int(cs.Host.Count),
+ Available: int(cs.old.Host.Available) - int(cs.Host.Available),
+ },
+ Network: NetworkDiff{
+ Count: int(cs.old.Network.Count) - int(cs.Network.Count),
+ Active: int(cs.old.Network.Active) - int(cs.Network.Active),
+ Inactive: int(cs.old.Network.Inactive) - int(cs.Network.Inactive),
},
}
}
@@ -366,3 +312,12 @@ func (cs *ClusterStats) reset() {
cs.Network.Active = 0
cs.Network.Inactive = 0
}
+
+func isNetworkPool(pooltype string) bool {
+ for _, t := range storagepool.NetTypes {
+ if t == pooltype {
+ return true
+ }
+ }
+ return false
+}
diff --git a/go.mod b/go.mod
index b90e8f5..b3035a8 100644
--- a/go.mod
+++ b/go.mod
@@ -6,15 +6,14 @@ require (
github.com/a-h/templ v0.2.598
github.com/go-chi/chi/v5 v5.0.12
github.com/jaypipes/pcidb v1.0.0
- github.com/wcharczuk/go-chart/v2 v2.1.1
- golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81
+ github.com/rs/zerolog v1.32.0
libvirt.org/go/libvirt v1.10001.0
libvirt.org/go/libvirtxml v1.10001.0
)
require (
- github.com/blend/go-sdk v1.20220411.3 // indirect
- github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/go-homedir v1.0.0 // indirect
- golang.org/x/image v0.11.0 // indirect
+ golang.org/x/sys v0.15.0 // indirect
)
diff --git a/go.sum b/go.sum
index 46b28d1..93f53f0 100644
--- a/go.sum
+++ b/go.sum
@@ -1,54 +1,30 @@
github.com/a-h/templ v0.2.598 h1:6jMIHv6wQZvdPxTuv87erW4RqN/FPU0wk7ZHN5wVuuo=
github.com/a-h/templ v0.2.598/go.mod h1:SA7mtYwVEajbIXFRh3vKdYm/4FYyLQAtPH1+KxzGPA8=
-github.com/blend/go-sdk v1.20220411.3 h1:GFV4/FQX5UzXLPwWV03gP811pj7B8J2sbuq+GJQofXc=
-github.com/blend/go-sdk v1.20220411.3/go.mod h1:7lnH8fTi6U4i1fArEXRyOIY2E1X4MALg09qsQqY1+ak=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s=
github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
-github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
-github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8=
github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/wcharczuk/go-chart/v2 v2.1.1 h1:2u7na789qiD5WzccZsFz4MJWOJP72G+2kUuJoSNqWnE=
-github.com/wcharczuk/go-chart/v2 v2.1.1/go.mod h1:CyCAUt2oqvfhCl6Q5ZvAZwItgpQKZOkCJGb+VGv6l14=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81 h1:6R2FC06FonbXQ8pK11/PDFY6N6LWlf9KlzibaCapmqc=
-golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ=
-golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo=
-golang.org/x/image v0.11.0/go.mod h1:bglhjqbqVuEb9e9+eNR45Jfu7D+T4Qan+NhQk8Ck2P8=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0=
+github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
libvirt.org/go/libvirt v1.10001.0 h1:lEVDNE7xfzmZXiDEGIS8NvJSuaz11OjRXw+ufbQEtPY=
libvirt.org/go/libvirt v1.10001.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ=
libvirt.org/go/libvirtxml v1.10001.0 h1:r9WBs24r3mxIG3/hAMRRwDMy4ZaPHmhHjw72o/ceXic=
diff --git a/lib/guest/lib.go b/lib/guest/lib.go
index dc52ec5..0f0f728 100644
--- a/lib/guest/lib.go
+++ b/lib/guest/lib.go
@@ -3,9 +3,9 @@ package guest
import (
"errors"
- "log"
"time"
+ "git.staur.ca/stobbsm/clustvirt/lib/log"
"git.staur.ca/stobbsm/clustvirt/util"
"libvirt.org/go/libvirt"
)
@@ -248,7 +248,7 @@ func GetGuest(name string, conn *libvirt.Connect) (*VM, error) {
// Not errors, but still log the warnings when this happens
if g.BackupXML, err = g.dom.BackupGetXMLDesc(0); err != nil {
- log.Printf("WARNING: While loading backup information: %s", err)
+ log.Warn("guest.GetGuest").Str("guest", g.Name).Err(err).Send()
}
go func() {
@@ -262,7 +262,7 @@ func GetGuest(name string, conn *libvirt.Connect) (*VM, error) {
// Close closes an open connection
func (g *VM) Close() error {
- log.Println("Closing VM", g.Name)
+ log.Info("guest.Close").Str("guest", g.Name).Msg("closing vm")
close(g.close)
return <-g.closeErr
}
diff --git a/lib/host/charts.go b/lib/host/charts.go
deleted file mode 100644
index 87b0c86..0000000
--- a/lib/host/charts.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package host
-
-import (
- "fmt"
- "log"
- "strings"
-
- "github.com/wcharczuk/go-chart/v2"
-)
-
-// This file contains utilities to create charts based on the different data
-// When a chart is rendered, it can return either an SVG or PNG. SVG is preferrable.
-
-func (h *Host) ChartMemory() string {
- h.getNodeInfo()
- log.Println("Generating Chart")
- memFree := float64(h.NodeMemory.Free)
- memCached := float64(h.NodeMemory.Cached)
- memBuffer := float64(h.NodeMemory.Buffers)
- memTotal := float64(h.NodeMemory.Total)
-
- c := chart.PieChart{
- Title: fmt.Sprintf("Memory Info %s", h.SystemHostName),
- Width: 256,
- Height: 256,
- Values: []chart.Value{
- {Value: memTotal - memFree, Label: fmt.Sprintf("%.2f%% Free", memFree/memTotal*100)},
- {Value: memTotal - memCached, Label: fmt.Sprintf("%.2f%% Cached", memCached/memTotal*100)},
- {Value: memTotal - memBuffer, Label: fmt.Sprintf("%.2f%% Buffers", memBuffer/memTotal*100)},
- },
- }
- sb := new(strings.Builder)
- log.Println("Rendering chart")
- if err := c.Render(chart.SVG, sb); err != nil {
- return err.Error()
- }
- return sb.String()
-}
diff --git a/lib/host/errors.go b/lib/host/errors.go
new file mode 100644
index 0000000..8237498
--- /dev/null
+++ b/lib/host/errors.go
@@ -0,0 +1,12 @@
+package host
+
+import "errors"
+
+var (
+ // XML errors
+ ErrGetXML = errors.New("error getting XML")
+ ErrParseXML = errors.New("error parsing XML")
+
+ // Hostinfo errors
+ ErrHostInfo = errors.New("error processing HostInfo")
+)
diff --git a/lib/host/lib.go b/lib/host/lib.go
index 32c9bda..d7050d8 100644
--- a/lib/host/lib.go
+++ b/lib/host/lib.go
@@ -7,10 +7,10 @@
package host
import (
- "log"
"sync"
"git.staur.ca/stobbsm/clustvirt/lib/guest"
+ "git.staur.ca/stobbsm/clustvirt/lib/log"
"libvirt.org/go/libvirt"
"libvirt.org/go/libvirtxml"
)
@@ -114,7 +114,7 @@ func (h *Host) GetGuestByName(name string) (*guest.VM, error) {
// Close triggers closing the host connection
func (h *Host) Close() error {
- log.Println("Closing Host", h.HostName)
+ log.Info("Host.Close").Str("hostname", h.HostName).Msg("closing connection")
close(h.close)
return <-h.closeErr
}
@@ -151,29 +151,33 @@ func (h *Host) hostInfo() {
rawxml, err := h.conn.GetCapabilities()
if err != nil {
- log.Printf("error getting host capabilities XML: %s", err)
+ log.Error("Host.hostInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
}
xmldoc := &libvirtxml.Caps{}
if err = xmldoc.Unmarshal(rawxml); err != nil {
- log.Printf("error parsing host capabilities XML: %s", err)
+ log.Error("Host.hostInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
}
h.HostInfo = &xmldoc.Host
h.SystemHostName, err = h.conn.GetHostname()
if err != nil {
- log.Println("error getting system host name: %s", err)
- } else {
+ log.Error("Host.hostInfo").Str("hostname", h.HostName).Err(err).Msg("unable to set SystemHostName")
+ }
+ if h.SystemHostName == "" {
h.SystemHostName = h.HostName
}
+ log.Info("Host.hostInfo").Str("hostname", h.HostName).Str("system hostname", h.SystemHostName).Msg("set system hostname")
h.LibVersion, err = h.conn.GetLibVersion()
if err != nil {
- log.Println(err)
+ log.Error("Host.hostInfo").Str("hostname", h.HostName).Err(err).Msg("unable to get libversion")
+ return
}
+ log.Info("Host.hostInfo").Str("hostname", h.HostName).Uint32("libversion", h.LibVersion).Send()
}
func (h *Host) memoryInfo() {
mi, err := h.conn.GetMemoryStats(libvirt.NODE_MEMORY_STATS_ALL_CELLS, 0)
if err != nil {
- log.Println(err)
+ log.Error("Host.memoryInfo").Str("hostname", h.HostName).Err(err).Send()
}
h.NodeMemory = &NodeMemoryInfo{
Total: mi.Total,
@@ -184,20 +188,20 @@ func (h *Host) memoryInfo() {
h.SysInfo, err = h.conn.GetSysinfo(0)
if err != nil {
- log.Println(err)
+ log.Error("Host.memoryInfo").Str("hostname", h.HostName).Err(err).Msg("failed to GetSysInfo")
}
h.Alive, err = h.conn.IsAlive()
if err != nil {
- log.Println(err)
+ log.Error("Host.memoryInfo").Str("hostname", h.HostName).Err(err).Msg("failed check to IsAlive")
}
h.Encrypted, err = h.conn.IsEncrypted()
if err != nil {
- log.Println(err)
+ log.Error("Host.memoryInfo").Str("hostname", h.HostName).Err(err).Msg("failed to check IsEncrypted")
}
h.Secure, err = h.conn.IsSecure()
if err != nil {
- log.Println(err)
+ log.Error("Host.memoryInfo").Str("hostname", h.HostName).Err(err).Msg("failed to check IsSecure")
}
}
@@ -205,7 +209,7 @@ func (h *Host) getStoragePools() {
// Get list of all storage pools on the host
spools, err := h.conn.ListAllStoragePools(0)
if err != nil {
- log.Println(err)
+ log.Error("Host.getStoragePools").Str("hostname", h.HostName).Err(err).Msg("failed to ListAllStoragePools")
}
if len(spools) > 0 {
h.StoragePoolList = make([]*libvirtxml.StoragePool, len(spools))
@@ -216,13 +220,13 @@ func (h *Host) getStoragePools() {
// Get the XML represenation of each storage pool, parse it with libvirtxml
rawxml, err := s.GetXMLDesc(0)
if err != nil {
- log.Println("error getting storage pool xml: %s", err)
+ log.Error("Host.getStoragePools").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
return
}
xmldoc := &libvirtxml.StoragePool{}
err = xmldoc.Unmarshal(rawxml)
if err != nil {
- log.Println("error parsing storage pool XML: %s", err)
+ log.Error("Host.getStoragePools").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
return
}
h.StoragePoolList[i] = xmldoc
@@ -230,7 +234,7 @@ func (h *Host) getStoragePools() {
// Get list of all storage volumes in the current storage pool
svols, err := s.ListAllStorageVolumes(0)
if err != nil {
- log.Println(err)
+ log.Error("Host.getStoragePools").Str("hostname", h.HostName).Str("storagepool", h.StoragePoolList[i].Name).Err(err).Msg("failed to ListAllStorageVolumes")
}
if len(svols) > 0 {
// define temporary variable to hold slice of StorageVolume, that can
@@ -243,29 +247,32 @@ func (h *Host) getStoragePools() {
defer sv.Free()
rawxml, err = sv.GetXMLDesc(0)
if err != nil {
- log.Printf("error getting XML from storage volume: %s", err)
+ log.Error("Host.getStoragePools").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
return
}
xmldoc := &libvirtxml.StorageVolume{}
err = xmldoc.Unmarshal(rawxml)
if err != nil {
- log.Printf("error parsing storage volume XML: %s", err)
+ log.Error("Host.getStoragePools").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
return
}
-
tvl[j] = xmldoc
+ log.Info("Host.getStoragePools").Str("hostname", h.HostName).Str("added volume", tvl[j].Name).Send()
}()
}
// Append the contents of tvl to h.VolumeList
if h.VolumeList == nil {
+ log.Info("Host.getStoragePools").Str("hostname", h.HostName).Str("storagepool", h.StoragePoolList[i].Name).Msg("initializing VolumeList")
h.VolumeList = []*libvirtxml.StorageVolume{}
}
// Only append if the temporary storage volume isn't nil
+ log.Info("Host.getStoragePools").Str("hostname", h.HostName).Str("storagepool", h.StoragePoolList[i].Name).Int("VolumeList count", len(h.VolumeList)).Msg("before filter")
for _, tsv := range tvl {
if tsv != nil {
h.VolumeList = append(h.VolumeList, tsv)
}
}
+ log.Info("Host.getStoragePools").Str("hostname", h.HostName).Str("storagepool", h.StoragePoolList[i].Name).Int("VolumeList count", len(h.VolumeList)).Msg("after filter")
}
}()
}
@@ -275,7 +282,7 @@ func (h *Host) getStoragePools() {
func (h *Host) getSecretsInfo() {
nsecrets, err := h.conn.ListAllSecrets(0)
if err != nil {
- log.Printf("error loading secrets from host: %s", err)
+ log.Error("Host.getSecretsInfo").Str("hostname", h.HostName).Err(err).Send()
}
if len(nsecrets) > 0 {
h.SecretList = make([]*libvirtxml.Secret, len(nsecrets))
@@ -284,11 +291,11 @@ func (h *Host) getSecretsInfo() {
defer s.Free()
rawxml, err := s.GetXMLDesc(0)
if err != nil {
- log.Printf("error getting secret XML", err)
+ log.Error("Host.getSecretsInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
}
xmldoc := &libvirtxml.Secret{}
if err = xmldoc.Unmarshal(rawxml); err != nil {
- log.Printf("error parsing secret XML: %s", err)
+ log.Error("Host.getSecretsInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
}
h.SecretList[i] = xmldoc
}()
@@ -300,7 +307,8 @@ func (h *Host) getDomainInfo() {
// getDomainInfo
doms, err := h.conn.ListAllDomains(0)
if err != nil {
- log.Println(err)
+ log.Error("Host.getDomainInfo").Str("hostname", h.HostName).Err(err).Send()
+ return
}
if len(doms) > 0 {
h.VMList = make([]*libvirtxml.Domain, len(doms))
@@ -309,14 +317,15 @@ func (h *Host) getDomainInfo() {
defer d.Free()
rawxml, err := d.GetXMLDesc(0)
if err != nil {
- log.Printf("error getting domain XML: %s", err)
+ log.Error("Host.getDomainInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
+ return
}
h.VMList[i] = &libvirtxml.Domain{}
if err = h.VMList[i].Unmarshal(rawxml); err != nil {
- log.Printf("error parsing domain XML: %s", err)
+ log.Error("Host.getDomainInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
+ return
}
}()
-
}
}
}
@@ -325,7 +334,7 @@ func (h *Host) getIfaceInfo() {
// getIfaceInfo
ifaces, err := h.conn.ListAllInterfaces(0)
if err != nil {
- log.Println(err)
+ log.Error("Host.getIfaceInfo").Str("hostname", h.HostName).Err(err).Send()
}
if len(ifaces) > 0 {
h.NetIfFList = make([]*libvirtxml.Interface, len(ifaces))
@@ -334,12 +343,12 @@ func (h *Host) getIfaceInfo() {
defer ni.Free()
rawxml, err := ni.GetXMLDesc(0)
if err != nil {
- log.Printf("error getting interface XML: %s")
+ log.Error("Host.getIfaceInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
return
}
h.NetIfFList[i] = &libvirtxml.Interface{}
if err = h.NetIfFList[i].Unmarshal(rawxml); err != nil {
- log.Printf("error parsing interface XML: %s")
+ log.Error("Host.getIfaceInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
return
}
}()
@@ -349,31 +358,26 @@ func (h *Host) getIfaceInfo() {
func (h *Host) getNetsInfo() {
// getNetsInfo
- nets, err := h.conn.ListNetworks()
+ nets, err := h.conn.ListAllNetworks(0)
if err != nil {
- log.Println(err)
+ log.Error("Host.getNetsInfo").Str("hostname", h.HostName).Err(err).Send()
}
if len(nets) > 0 {
- h.NetworkList = make([]NetworkInfo, len(nets))
- for i, netName := range nets {
- net, err := h.conn.LookupNetworkByName(netName)
- if err != nil {
- log.Println(err)
- }
- if h.NetworkList[i].Name, err = net.GetName(); err != nil {
- log.Println(err)
- }
- if h.NetworkList[i].UUID, err = net.GetUUID(); err != nil {
- log.Println(err)
- }
- if h.NetworkList[i].XML, err = net.GetXMLDesc(0); err != nil {
- log.Println(err)
- }
- if h.NetworkList[i].Active, err = net.IsActive(); err != nil {
- log.Println(err)
- }
-
- net.Free()
+ h.NetworkList = make([]*libvirtxml.Network, len(nets))
+ for i, net := range nets {
+ func() {
+ defer net.Free()
+ rawxml, err := net.GetXMLDesc(0)
+ if err != nil {
+ log.Error("Host.getNetsInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
+ return
+ }
+ h.NetworkList[i] = &libvirtxml.Network{}
+ if err = h.NetworkList[i].Unmarshal(rawxml); err != nil {
+ log.Error("Host.getNetsInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
+ return
+ }
+ }()
}
}
}
@@ -381,28 +385,24 @@ func (h *Host) getNetsInfo() {
func (h *Host) getDevicesInfo() {
ndevs, err := h.conn.ListAllNodeDevices(0)
if err != nil {
- log.Println(err)
+ log.Error("Host.getDevicesInfo").Str("hostname", h.HostName).Err(err).Send()
}
if len(ndevs) > 0 {
- h.DeviceList = make([]DeviceInfo, len(ndevs))
+ h.DeviceList = make([]*libvirtxml.NodeDevice, len(ndevs))
for i, dev := range ndevs {
- if h.DeviceList[i].Name, err = dev.GetName(); err != nil {
- log.Println(err)
- }
- if h.DeviceList[i].Capabilities, err = dev.ListCaps(); err != nil {
- log.Println(err)
- }
- if h.DeviceList[i].XML, err = dev.GetXMLDesc(0); err != nil {
- log.Println(err)
- }
- dx := &libvirtxml.NodeDevice{}
- if err != dx.Unmarshal(h.DeviceList[i].XML); err != nil {
- log.Println(err)
- }
- h.DeviceList[i].Driver = dx.Driver.Name
- dx.Capability.PCI.Class
-
- dev.Free()
+ func() {
+ defer dev.Free()
+ rawxml, err := dev.GetXMLDesc(0)
+ if err != nil {
+ log.Error("Host.getDevicesInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
+ return
+ }
+ h.DeviceList[i] = &libvirtxml.NodeDevice{}
+ if err = h.DeviceList[i].Unmarshal(rawxml); err != nil {
+ log.Error("Host.getDevicesInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
+ return
+ }
+ }()
}
}
}
diff --git a/lib/host/uri.go b/lib/host/uri.go
index 1abdc65..109344e 100644
--- a/lib/host/uri.go
+++ b/lib/host/uri.go
@@ -1,8 +1,9 @@
package host
import (
- "log"
"strings"
+
+ "git.staur.ca/stobbsm/clustvirt/lib/log"
)
// URI is a string type, accessed via the pre-defined variables, and represent
@@ -72,7 +73,12 @@ func (u *URI) ConnectionString(h string) string {
}
}
}
- log.Printf("Connection URI: %s", sb.String())
+ log.Info("Host.ConnectionString").
+ Str("uri.Driver", u.Driver).
+ Str("uri.Transport", u.Transport).
+ Str("uri.Path", u.Path).
+ Strs("uri.Options", u.Options).
+ Str("builtUri", sb.String()).Send()
return sb.String()
}
diff --git a/lib/log/logs.go b/lib/log/logs.go
new file mode 100644
index 0000000..8b58f28
--- /dev/null
+++ b/lib/log/logs.go
@@ -0,0 +1,30 @@
+// Package logs provides a central location for ClustVirt logging, wrapping a Zerolog
+// instance to do the job.
+// This will standarize logging throughout ClustVirt, and make it easier to debug
+// issues.
+// The default is to log to Stderr
+// TODO: enable logging to syslog through configuration
+package log
+
+import (
+ "os"
+
+ "github.com/rs/zerolog"
+)
+
+var defaultLogger zerolog.Logger
+
+func init() {
+ defaultLogger = zerolog.New(zerolog.ConsoleWriter{
+ Out: os.Stderr,
+ TimeFormat: zerolog.TimeFormatUnix,
+ })
+}
+
+func Trace(method string) *zerolog.Event { return defaultLogger.Trace().Str("method", method) }
+func Debug(method string) *zerolog.Event { return defaultLogger.Debug().Str("method", method) }
+func Info(method string) *zerolog.Event { return defaultLogger.Info().Str("method", method) }
+func Warn(method string) *zerolog.Event { return defaultLogger.Warn().Str("method", method) }
+func Error(method string) *zerolog.Event { return defaultLogger.Error().Str("method", method) }
+func Fatal(method string) *zerolog.Event { return defaultLogger.Fatal().Str("method", method) }
+func Panic(method string) *zerolog.Event { return defaultLogger.Panic().Str("method", method) }
diff --git a/main.go b/main.go
index 9fa5b64..3e9eeec 100644
--- a/main.go
+++ b/main.go
@@ -14,6 +14,7 @@ import (
"github.com/a-h/templ"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
+ localmw "git.staur.ca/stobbsm/clustvirt/router/middleware"
)
const DEBUG bool = true
@@ -37,7 +38,7 @@ func main() {
fs := http.StripPrefix("/static/", http.FileServer(http.Dir("public")))
r := chi.NewRouter()
- r.Use(middleware.Logger)
+ r.Use(localmw.Logger)
if DEBUG {
r.Use(middleware.NoCache)
}
diff --git a/router/api/api.go b/router/api/api.go
new file mode 100644
index 0000000..505e5b6
--- /dev/null
+++ b/router/api/api.go
@@ -0,0 +1,3 @@
+// Package api defines the routes for the REST API
+package api
+
diff --git a/router/htmx/htmx.go b/router/htmx/htmx.go
new file mode 100644
index 0000000..dc0d520
--- /dev/null
+++ b/router/htmx/htmx.go
@@ -0,0 +1,4 @@
+// Package htmx contains the routes for the WebUI HTMX
+package htmx
+
+
diff --git a/router/middleware/logger.go b/router/middleware/logger.go
new file mode 100644
index 0000000..e8a517c
--- /dev/null
+++ b/router/middleware/logger.go
@@ -0,0 +1,29 @@
+package middleware
+
+import (
+ "net/http"
+ "time"
+
+ "git.staur.ca/stobbsm/clustvirt/lib/log"
+)
+
+// Logger uses the in package log module to handle route logging
+func Logger(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ start := time.Now()
+ defer func() {
+ log.Info("router.middleware.Logger").
+ Str("httpMethod", r.Method).
+ Str("host", r.Host).
+ Str("uri", r.URL.RequestURI()).
+ Str("user_agent", r.UserAgent()).
+ Str("referer", r.Referer()).
+ Strs("transfer-encoding", r.TransferEncoding).
+ Int64("length", r.ContentLength).
+ Dur("elasped_ms", time.Since(start)).
+ Msg("incoming connection")
+ }()
+
+ next.ServeHTTP(w, r)
+ })
+}
diff --git a/router/router.go b/router/router.go
new file mode 100644
index 0000000..943366c
--- /dev/null
+++ b/router/router.go
@@ -0,0 +1,57 @@
+// Package router defines the base routes http server
+package router
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "git.staur.ca/stobbsm/clustvirt/lib/log"
+ "github.com/go-chi/chi/v5"
+ "github.com/go-chi/chi/v5/middleware"
+)
+
+type Server struct {
+ bindAddr string
+ ssl bool
+ middleware []http.Handler
+}
+
+// New creates a new HTTP Server instance.
+// Requires the IP and port number to bind to
+func New(listen string, port int) *Server {
+ s := &Server{bindAddr: fmt.Sprintf("%s:%d", listen, port)}
+
+ return s
+}
+
+// Start starts the server and initializes the router and common middleware
+func (s *Server) Start() {
+ tstart := time.Now()
+ defer func() {
+ log.Info("router.Server.Start").
+ Dur("upTime", time.Since(tstart)).
+ Msg("http server stopped")
+ }()
+ router := chi.NewRouter()
+
+ indev, _ := os.LookupEnv("CLUSTVIRT_DEV")
+ indev = strings.ToLower(indev)
+ switch indev {
+ case "true":
+ fallthrough
+ case "1":
+ fallthrough
+ case "yes":
+ fallthrough
+ case "on":
+ router.Use(middleware.NoCache)
+ }
+}
+
+func (s *Server) AddMiddleware(m http.Handler) {
+ s.middleware = append(s.middleware,
+ m)
+}
diff --git a/util/pcidb.go b/util/pcidb.go
index c764d1c..0681701 100644
--- a/util/pcidb.go
+++ b/util/pcidb.go
@@ -1,8 +1,7 @@
package util
import (
- "log"
-
+ "git.staur.ca/stobbsm/clustvirt/lib/log"
"github.com/jaypipes/pcidb"
)
@@ -13,7 +12,7 @@ var (
const (
pcidbNOTFOUND string = `NOTFOUND`
- pcidbNODB string = `NODBFOUND`
+ pcidbNODB string = `NODBFOUND`
)
func initPCIDB() {
@@ -23,11 +22,10 @@ func initPCIDB() {
// local sources aren't found
db, err = pcidb.New()
if err != nil {
- log.Printf("warning: couldn't use local pcidb cache: %s", err)
- log.Println("falling back to downloading database")
+ log.Warn("util.initPCIDB").Err(err).Msg("fallback to downloading pcidb")
db, err = pcidb.New(pcidb.WithEnableNetworkFetch())
if err != nil {
- log.Println("error: couldn't get pcidb. no more fallbacks available, will not be able to query the pcidb")
+ log.Error("util.initPCIDB").Err(err).Msg("no more fallbacks available")
}
}
pcidbInitDone = true
@@ -38,7 +36,7 @@ func GetPCIClass(id string) string {
initPCIDB()
}
if pcidbInitDone && db == nil {
- log.Println("unable to access pcidb")
+ log.Warn("util.GetPCIClass").Msg("no pcidb to do lookup")
return pcidbNODB
}
if class, ok := db.Classes[id]; ok {
diff --git a/view/cluster.templ b/view/cluster.templ
index 2ceaa40..f2cd565 100644
--- a/view/cluster.templ
+++ b/view/cluster.templ
@@ -10,10 +10,11 @@ import (
templ ClusterInfo(cs *cluster.ClusterStats, diff cluster.StatDiff, navbar []components.NavItem) {
@layouts.Manager("ClustVirt", "Cluster Manager", navbar) {
Cluster Stats
+ @CPUStats(cs, diff)
}
}
-templ CPUStats() {
+templ CPUStats(cs *cluster.ClusterStats, diff cluster.StatDiff) {
CPU stats
diff --git a/view/cluster_templ.go b/view/cluster_templ.go
index 7caa7de..80bdae9 100644
--- a/view/cluster_templ.go
+++ b/view/cluster_templ.go
@@ -40,141 +40,7 @@ func ClusterInfo(cs *cluster.ClusterStats, diff cluster.StatDiff, navbar []compo
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var3 = []any{"table-auto", "w-full"}
- templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var3...)
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var4 = []any{"caption-top"}
- templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var4...)
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("CPU stats Sockets | Cores | Threads | Allocated |
")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var5 string
- templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Sockets))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 27, Col: 35}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var6 string
- templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Cores))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 30, Col: 33}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var7 string
- templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Threads))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 33, Col: 35}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var8 string
- templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Allocated))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 36, Col: 37}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" |
")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var9 string
- templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Sockets))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 41, Col: 42}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var10 string
- templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Cores))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 44, Col: 40}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var11 string
- templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Threads))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 47, Col: 42}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var12 string
- templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Allocated))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 50, Col: 44}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" |
")
+ templ_7745c5c3_Err = CPUStats(cs, diff).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -193,3 +59,161 @@ func ClusterInfo(cs *cluster.ClusterStats, diff cluster.StatDiff, navbar []compo
return templ_7745c5c3_Err
})
}
+
+func CPUStats(cs *cluster.ClusterStats, diff cluster.StatDiff) templ.Component {
+ return templ.ComponentFunc(func(ctx context.Context, templ_7745c5c3_W io.Writer) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templ_7745c5c3_W.(*bytes.Buffer)
+ if !templ_7745c5c3_IsBuffer {
+ templ_7745c5c3_Buffer = templ.GetBuffer()
+ defer templ.ReleaseBuffer(templ_7745c5c3_Buffer)
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var3 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var3 == nil {
+ templ_7745c5c3_Var3 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ var templ_7745c5c3_Var4 = []any{"table-auto", "w-full"}
+ templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var4...)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var5 = []any{"caption-top"}
+ templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var5...)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("CPU stats | Sockets | Cores | Threads | Allocated |
Latest | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var6 string
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Sockets))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 36, Col: 33}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var7 string
+ templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Cores))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 39, Col: 31}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var8 string
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Threads))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 42, Col: 33}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var9 string
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Allocated))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 45, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" |
Change | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var10 string
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Sockets))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 53, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var11 string
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Cores))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 56, Col: 33}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Threads))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 59, Col: 35}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var13 string
+ templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Allocated))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 62, Col: 37}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" |
")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if !templ_7745c5c3_IsBuffer {
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteTo(templ_7745c5c3_W)
+ }
+ return templ_7745c5c3_Err
+ })
+}