done migrating host module to libvirtxml
- need to add accessor methods - eventually will move the VMList to the local guest module
This commit is contained in:
parent
99fcb2b360
commit
ffca8c3970
@ -1,10 +1,10 @@
|
|||||||
package cluster
|
package cluster
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.staur.ca/stobbsm/clustvirt/lib/host"
|
"git.staur.ca/stobbsm/clustvirt/lib/host"
|
||||||
|
"git.staur.ca/stobbsm/clustvirt/lib/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClusterBuilder is used to build a Cluster object, which can then be used
|
// ClusterBuilder is used to build a Cluster object, which can then be used
|
||||||
@ -42,12 +42,17 @@ func (c *ClusterBuilder) DefaultHostURI(uri *host.URI) *ClusterBuilder {
|
|||||||
|
|
||||||
func (c *ClusterBuilder) AddHost(h string) *ClusterBuilder {
|
func (c *ClusterBuilder) AddHost(h string) *ClusterBuilder {
|
||||||
if _, ok := c.cluster.hosts[h]; ok {
|
if _, ok := c.cluster.hosts[h]; ok {
|
||||||
log.Println("warning: trying to add duplicate host")
|
log.Warn("cluster.AddHost").
|
||||||
|
Str("hostname", h).
|
||||||
|
Msg("won't overwrite existing host")
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
hc, err := host.ConnectHost(c.cluster.defaultURI, h)
|
hc, err := host.ConnectHost(c.cluster.defaultURI, h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("failed to connect to host: %s, %s", h, err)
|
log.Error("cluster.AddHost").
|
||||||
|
Str("hostname", h).
|
||||||
|
Err(err).
|
||||||
|
Msg("failed to connect to host")
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
c.cluster.hosts[h] = hc
|
c.cluster.hosts[h] = hc
|
||||||
@ -56,12 +61,17 @@ func (c *ClusterBuilder) AddHost(h string) *ClusterBuilder {
|
|||||||
|
|
||||||
func (c *ClusterBuilder) AddHostWithURI(h string, uri *host.URI) *ClusterBuilder {
|
func (c *ClusterBuilder) AddHostWithURI(h string, uri *host.URI) *ClusterBuilder {
|
||||||
if _, ok := c.cluster.hosts[h]; ok {
|
if _, ok := c.cluster.hosts[h]; ok {
|
||||||
log.Println("warning: trying to add duplicate host")
|
log.Warn("cluster.AddHostWithURI").
|
||||||
|
Str("hostname", h).
|
||||||
|
Msg("won't overwrite existing host")
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
hc, err := host.ConnectHost(uri, h)
|
hc, err := host.ConnectHost(uri, h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("failed to connect to host: %s, %s", h, err)
|
log.Error("cluster.AddHostWithURI").
|
||||||
|
Str("hostname", h).
|
||||||
|
Err(err).
|
||||||
|
Msg("failed to connect to host")
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
c.cluster.hosts[h] = hc
|
c.cluster.hosts[h] = hc
|
||||||
|
167
cluster/stats.go
167
cluster/stats.go
@ -1,24 +1,26 @@
|
|||||||
package cluster
|
package cluster
|
||||||
|
|
||||||
|
import "git.staur.ca/stobbsm/clustvirt/lib/storagepool"
|
||||||
|
|
||||||
// ClusterStats is used to gather stats for the entire cluster
|
// ClusterStats is used to gather stats for the entire cluster
|
||||||
// Combined with StatsDiff, we can get some basic cluster wide stats tracking
|
// Combined with StatsDiff, we can get some basic cluster wide stats tracking
|
||||||
type ClusterStats struct {
|
type ClusterStats struct {
|
||||||
// CPU Statistics including number of CPUs
|
// CPU Statistics including number of CPUs
|
||||||
CPU CPUStats
|
CPU CPUStats
|
||||||
// Memory provides information about the amount of memory, including free and
|
// Memory provides information about the amount of memory, including free and
|
||||||
// allocated memory
|
// allocated memory
|
||||||
Memory MemoryStats
|
Memory MemoryStats
|
||||||
// Storage provides information about storage pools, Only get's stats for active
|
// Storage provides information about storage pools, Only get's stats for active
|
||||||
// pools, and will not activate pools that are not already active.
|
// pools, and will not activate pools that are not already active.
|
||||||
// Trys to sort out shared file systems from local filesystems using the Type parameter
|
// Trys to sort out shared file systems from local filesystems using the Type parameter
|
||||||
// of Host.StoragePoolInfo
|
// of Host.StoragePoolInfo
|
||||||
Storage StorageStats
|
Storage StorageStats
|
||||||
// Volume provides information on allocated volumes used in the cluster
|
// Volume provides information on allocated volumes used in the cluster
|
||||||
Volume VolumeStats
|
Volume VolumeStats
|
||||||
// VM provides VM specific counters for the cluster
|
// VM provides VM specific counters for the cluster
|
||||||
VM VMStats
|
VM VMStats
|
||||||
// Host provides Host information for the cluster
|
// Host provides Host information for the cluster
|
||||||
Host HostStats
|
Host HostStats
|
||||||
// Network provices available networks, and how many are shared between hosts
|
// Network provices available networks, and how many are shared between hosts
|
||||||
Network NetworkStats
|
Network NetworkStats
|
||||||
// NetIF provides information about Libvirt allocated networks, usable by the
|
// NetIF provides information about Libvirt allocated networks, usable by the
|
||||||
@ -52,6 +54,7 @@ type MemoryStats struct {
|
|||||||
// StorageStats provides information about the available storage pools in the cluster,
|
// StorageStats provides information about the available storage pools in the cluster,
|
||||||
// including the amount of space available, allocated, and how many pools are shared
|
// including the amount of space available, allocated, and how many pools are shared
|
||||||
// between hosts
|
// between hosts
|
||||||
|
// All sizes are in Bytes
|
||||||
type StorageStats struct {
|
type StorageStats struct {
|
||||||
Total uint64
|
Total uint64
|
||||||
Used uint64
|
Used uint64
|
||||||
@ -81,7 +84,7 @@ type VMStats struct {
|
|||||||
type HostStats struct {
|
type HostStats struct {
|
||||||
Count uint32
|
Count uint32
|
||||||
Available uint32
|
Available uint32
|
||||||
Nodes uint32
|
Nodes uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetworkStats provides informatoin about the available Host network connections,
|
// NetworkStats provides informatoin about the available Host network connections,
|
||||||
@ -109,7 +112,7 @@ type DeviceStats struct {
|
|||||||
// SecretStats provides the number of secrets defined throughout the cluster.
|
// SecretStats provides the number of secrets defined throughout the cluster.
|
||||||
// Shared secrets are only counted once, and are recognized by their UUID
|
// Shared secrets are only counted once, and are recognized by their UUID
|
||||||
type SecretStats struct {
|
type SecretStats struct {
|
||||||
Count uint32
|
Count uint32
|
||||||
Shared uint32
|
Shared uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -117,7 +120,7 @@ type SecretStats struct {
|
|||||||
type StatDiff struct {
|
type StatDiff struct {
|
||||||
CPU CPUDiff
|
CPU CPUDiff
|
||||||
Memory MemoryDiff
|
Memory MemoryDiff
|
||||||
Storage StorageStats
|
Storage StorageDiff
|
||||||
Volume VolumeDiff
|
Volume VolumeDiff
|
||||||
VM VMDiff
|
VM VMDiff
|
||||||
Host HostDiff
|
Host HostDiff
|
||||||
@ -186,9 +189,9 @@ func (cs *ClusterStats) Update() {
|
|||||||
cs.Host.Count++
|
cs.Host.Count++
|
||||||
cs.Host.Available++
|
cs.Host.Available++
|
||||||
|
|
||||||
cs.CPU.Sockets += h.HostInfo.Sockets
|
cs.CPU.Sockets += uint32(h.HostInfo.CPU.Topology.Sockets)
|
||||||
cs.CPU.Cores += h.HostInfo.Cores
|
cs.CPU.Cores += uint32(h.HostInfo.CPU.Topology.Cores)
|
||||||
cs.CPU.Threads += h.HostInfo.Threads
|
cs.CPU.Threads += uint32(h.HostInfo.CPU.Topology.Threads)
|
||||||
|
|
||||||
cs.Memory.Total += h.NodeMemory.Total
|
cs.Memory.Total += h.NodeMemory.Total
|
||||||
cs.Memory.Free += h.NodeMemory.Free
|
cs.Memory.Free += h.NodeMemory.Free
|
||||||
@ -204,128 +207,71 @@ func (cs *ClusterStats) Update() {
|
|||||||
// Already counted this shared pool, move on
|
// Already counted this shared pool, move on
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if sp.HAEnabled {
|
if isNetworkPool(sp.Type) {
|
||||||
countedSharedPools[sp.Name] = struct{}{}
|
countedSharedPools[sp.Name] = struct{}{}
|
||||||
}
|
}
|
||||||
if !sp.Active {
|
|
||||||
cs.Storage.Inactive++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cs.Storage.Active++
|
cs.Storage.Active++
|
||||||
cs.Storage.Total += sp.Capacity
|
cs.Storage.Total += sp.Capacity.Value
|
||||||
cs.Storage.Used += sp.Allocation
|
cs.Storage.Used += sp.Allocation.Value
|
||||||
cs.Storage.Free += sp.Capacity - sp.Allocation
|
cs.Storage.Free += sp.Capacity.Value - sp.Allocation.Value
|
||||||
// Volumes in the pool
|
|
||||||
cs.Volume.Total += uint32(len(sp.Volumes))
|
|
||||||
for range sp.Volumes {
|
|
||||||
cs.Volume.Active++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// VM Count
|
// VM Count
|
||||||
cs.VM.Count += uint32(len(h.VMList))
|
cs.VM.Count += uint32(len(h.VMList))
|
||||||
for _, vm := range h.VMList {
|
for _, vm := range h.VMList {
|
||||||
cs.CPU.Allocated += uint32(vm.VCPUs)
|
cs.CPU.Allocated += uint32(len(vm.VCPUs.VCPU))
|
||||||
cs.Memory.Allocated += uint64(vm.Memory)
|
cs.Memory.Allocated += uint64(vm.Memory.Value)
|
||||||
if vm.Active {
|
|
||||||
cs.VM.Started++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cs.VM.Stopped++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Network count
|
// Network count
|
||||||
cs.Network.Count += uint32(len(h.NetworkList))
|
cs.Network.Count += uint32(len(h.NetworkList))
|
||||||
for _, ni := range h.NetworkList {
|
cs.Network.Inactive++
|
||||||
if ni.Active {
|
|
||||||
cs.Network.Active++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cs.Network.Inactive++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Diff returns a map of all the field and how they changed
|
// Diff returns a map of all the field and how they changed
|
||||||
func (cs *ClusterStats) Diff() StatDiff {
|
func (cs *ClusterStats) Diff() StatDiff {
|
||||||
return StatDiff{
|
return StatDiff{
|
||||||
CPU: struct {
|
CPU: CPUDiff{
|
||||||
Sockets int
|
|
||||||
Cores int
|
|
||||||
Threads int
|
|
||||||
Allocated int
|
|
||||||
}{
|
|
||||||
Sockets: int(cs.CPU.Sockets) - int(cs.old.CPU.Sockets),
|
Sockets: int(cs.CPU.Sockets) - int(cs.old.CPU.Sockets),
|
||||||
Cores: int(cs.CPU.Cores) - int(cs.old.CPU.Cores),
|
Cores: int(cs.CPU.Cores) - int(cs.old.CPU.Cores),
|
||||||
Threads: int(cs.CPU.Threads) - int(cs.old.CPU.Threads),
|
Threads: int(cs.CPU.Threads) - int(cs.old.CPU.Threads),
|
||||||
Allocated: int(cs.CPU.Allocated) - int(cs.old.CPU.Allocated),
|
Allocated: int(cs.CPU.Allocated) - int(cs.old.CPU.Allocated),
|
||||||
},
|
},
|
||||||
Memory: struct {
|
Memory: MemoryDiff{
|
||||||
Total int
|
Total: int64(cs.old.Memory.Total) - int64(cs.Memory.Total),
|
||||||
Free int
|
Free: int64(cs.old.Memory.Free) - int64(cs.Memory.Free),
|
||||||
Buffers int
|
Buffers: int64(cs.old.Memory.Buffers) - int64(cs.Memory.Buffers),
|
||||||
Cached int
|
Cached: int64(cs.old.Memory.Cached) - int64(cs.Memory.Cached),
|
||||||
Allocated int
|
Allocated: int64(cs.old.Memory.Allocated) - int64(cs.Memory.Allocated),
|
||||||
}{
|
|
||||||
Total: int(cs.old.Memory.Total - cs.Memory.Total),
|
|
||||||
Free: int(cs.old.Memory.Free - cs.Memory.Free),
|
|
||||||
Buffers: int(cs.old.Memory.Buffers - cs.Memory.Buffers),
|
|
||||||
Cached: int(cs.old.Memory.Cached - cs.Memory.Cached),
|
|
||||||
Allocated: int(cs.old.Memory.Allocated - cs.Memory.Allocated),
|
|
||||||
},
|
},
|
||||||
Storage: struct {
|
Storage: StorageDiff{
|
||||||
Total int
|
Total: int64(cs.old.Storage.Total) - int64(cs.Storage.Total),
|
||||||
Used int
|
Used: int64(cs.old.Storage.Used) - int64(cs.Storage.Used),
|
||||||
Free int
|
Free: int64(cs.old.Storage.Free) - int64(cs.Storage.Free),
|
||||||
Active int
|
Active: int64(cs.old.Storage.Active) - int64(cs.Storage.Active),
|
||||||
Inactive int
|
Inactive: int64(cs.old.Storage.Inactive) - int64(cs.Storage.Inactive),
|
||||||
Pools int
|
Pools: int(cs.old.Storage.Pools) - int(cs.Storage.Pools),
|
||||||
Volumes struct {
|
|
||||||
Total int
|
|
||||||
Active int
|
|
||||||
Inactive int
|
|
||||||
}
|
|
||||||
}{
|
|
||||||
Total: int(cs.old.Storage.Total - cs.Storage.Total),
|
|
||||||
Used: int(cs.old.Storage.Used - cs.Storage.Used),
|
|
||||||
Free: int(cs.old.Storage.Free - cs.Storage.Free),
|
|
||||||
Active: int(cs.old.Storage.Active - cs.Storage.Active),
|
|
||||||
Inactive: int(cs.old.Storage.Inactive - cs.Storage.Inactive),
|
|
||||||
Pools: int(cs.old.Storage.Pools - cs.Storage.Pools),
|
|
||||||
Volumes: struct {
|
|
||||||
Total int
|
|
||||||
Active int
|
|
||||||
Inactive int
|
|
||||||
}{
|
|
||||||
Total: int(cs.old.Volume.Total - cs.Volume.Total),
|
|
||||||
Active: int(cs.old.Volume.Active - cs.Volume.Active),
|
|
||||||
Inactive: int(cs.old.Volume.Inactive - cs.Volume.Inactive),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
VM: struct {
|
Volume: VolumeDiff{
|
||||||
Count int
|
Total: int(cs.old.Volume.Total) - int(cs.Volume.Total),
|
||||||
Started int
|
Active: int(cs.old.Volume.Active) - int(cs.Volume.Active),
|
||||||
Stopped int
|
Inactive: int(cs.old.Volume.Inactive) - int(cs.Volume.Inactive),
|
||||||
}{
|
|
||||||
Count: int(cs.old.VM.Count - cs.VM.Count),
|
|
||||||
Started: int(cs.old.VM.Started - cs.VM.Started),
|
|
||||||
Stopped: int(cs.old.VM.Stopped - cs.VM.Stopped),
|
|
||||||
},
|
},
|
||||||
Host: struct {
|
|
||||||
Count int
|
VM: VMDiff{
|
||||||
Available int
|
Count: int(cs.old.VM.Count) - int(cs.VM.Count),
|
||||||
}{
|
Started: int(cs.old.VM.Started) - int(cs.VM.Started),
|
||||||
Count: int(cs.old.Host.Count - cs.Host.Count),
|
Stopped: int(cs.old.VM.Stopped) - int(cs.VM.Stopped),
|
||||||
Available: int(cs.old.Host.Available - cs.Host.Available),
|
|
||||||
},
|
},
|
||||||
Network: struct {
|
Host: HostDiff{
|
||||||
Count int
|
Count: int(cs.old.Host.Count) - int(cs.Host.Count),
|
||||||
Active int
|
Available: int(cs.old.Host.Available) - int(cs.Host.Available),
|
||||||
Inactive int
|
},
|
||||||
}{
|
Network: NetworkDiff{
|
||||||
Count: int(cs.old.Network.Count - cs.Network.Count),
|
Count: int(cs.old.Network.Count) - int(cs.Network.Count),
|
||||||
Active: int(cs.old.Network.Active - cs.Network.Active),
|
Active: int(cs.old.Network.Active) - int(cs.Network.Active),
|
||||||
Inactive: int(cs.old.Network.Inactive - cs.Network.Inactive),
|
Inactive: int(cs.old.Network.Inactive) - int(cs.Network.Inactive),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -366,3 +312,12 @@ func (cs *ClusterStats) reset() {
|
|||||||
cs.Network.Active = 0
|
cs.Network.Active = 0
|
||||||
cs.Network.Inactive = 0
|
cs.Network.Inactive = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isNetworkPool(pooltype string) bool {
|
||||||
|
for _, t := range storagepool.NetTypes {
|
||||||
|
if t == pooltype {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
9
go.mod
9
go.mod
@ -6,15 +6,14 @@ require (
|
|||||||
github.com/a-h/templ v0.2.598
|
github.com/a-h/templ v0.2.598
|
||||||
github.com/go-chi/chi/v5 v5.0.12
|
github.com/go-chi/chi/v5 v5.0.12
|
||||||
github.com/jaypipes/pcidb v1.0.0
|
github.com/jaypipes/pcidb v1.0.0
|
||||||
github.com/wcharczuk/go-chart/v2 v2.1.1
|
github.com/rs/zerolog v1.32.0
|
||||||
golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81
|
|
||||||
libvirt.org/go/libvirt v1.10001.0
|
libvirt.org/go/libvirt v1.10001.0
|
||||||
libvirt.org/go/libvirtxml v1.10001.0
|
libvirt.org/go/libvirtxml v1.10001.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/blend/go-sdk v1.20220411.3 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.0.0 // indirect
|
github.com/mitchellh/go-homedir v1.0.0 // indirect
|
||||||
golang.org/x/image v0.11.0 // indirect
|
golang.org/x/sys v0.15.0 // indirect
|
||||||
)
|
)
|
||||||
|
58
go.sum
58
go.sum
@ -1,54 +1,30 @@
|
|||||||
github.com/a-h/templ v0.2.598 h1:6jMIHv6wQZvdPxTuv87erW4RqN/FPU0wk7ZHN5wVuuo=
|
github.com/a-h/templ v0.2.598 h1:6jMIHv6wQZvdPxTuv87erW4RqN/FPU0wk7ZHN5wVuuo=
|
||||||
github.com/a-h/templ v0.2.598/go.mod h1:SA7mtYwVEajbIXFRh3vKdYm/4FYyLQAtPH1+KxzGPA8=
|
github.com/a-h/templ v0.2.598/go.mod h1:SA7mtYwVEajbIXFRh3vKdYm/4FYyLQAtPH1+KxzGPA8=
|
||||||
github.com/blend/go-sdk v1.20220411.3 h1:GFV4/FQX5UzXLPwWV03gP811pj7B8J2sbuq+GJQofXc=
|
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/blend/go-sdk v1.20220411.3/go.mod h1:7lnH8fTi6U4i1fArEXRyOIY2E1X4MALg09qsQqY1+ak=
|
|
||||||
github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s=
|
github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s=
|
||||||
github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8=
|
github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8=
|
||||||
github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk=
|
github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk=
|
||||||
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
||||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/wcharczuk/go-chart/v2 v2.1.1 h1:2u7na789qiD5WzccZsFz4MJWOJP72G+2kUuJoSNqWnE=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/wcharczuk/go-chart/v2 v2.1.1/go.mod h1:CyCAUt2oqvfhCl6Q5ZvAZwItgpQKZOkCJGb+VGv6l14=
|
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81 h1:6R2FC06FonbXQ8pK11/PDFY6N6LWlf9KlzibaCapmqc=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ=
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo=
|
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||||
golang.org/x/image v0.11.0/go.mod h1:bglhjqbqVuEb9e9+eNR45Jfu7D+T4Qan+NhQk8Ck2P8=
|
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|
||||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
libvirt.org/go/libvirt v1.10001.0 h1:lEVDNE7xfzmZXiDEGIS8NvJSuaz11OjRXw+ufbQEtPY=
|
libvirt.org/go/libvirt v1.10001.0 h1:lEVDNE7xfzmZXiDEGIS8NvJSuaz11OjRXw+ufbQEtPY=
|
||||||
libvirt.org/go/libvirt v1.10001.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ=
|
libvirt.org/go/libvirt v1.10001.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ=
|
||||||
libvirt.org/go/libvirtxml v1.10001.0 h1:r9WBs24r3mxIG3/hAMRRwDMy4ZaPHmhHjw72o/ceXic=
|
libvirt.org/go/libvirtxml v1.10001.0 h1:r9WBs24r3mxIG3/hAMRRwDMy4ZaPHmhHjw72o/ceXic=
|
||||||
|
@ -3,9 +3,9 @@ package guest
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"git.staur.ca/stobbsm/clustvirt/lib/log"
|
||||||
"git.staur.ca/stobbsm/clustvirt/util"
|
"git.staur.ca/stobbsm/clustvirt/util"
|
||||||
"libvirt.org/go/libvirt"
|
"libvirt.org/go/libvirt"
|
||||||
)
|
)
|
||||||
@ -248,7 +248,7 @@ func GetGuest(name string, conn *libvirt.Connect) (*VM, error) {
|
|||||||
|
|
||||||
// Not errors, but still log the warnings when this happens
|
// Not errors, but still log the warnings when this happens
|
||||||
if g.BackupXML, err = g.dom.BackupGetXMLDesc(0); err != nil {
|
if g.BackupXML, err = g.dom.BackupGetXMLDesc(0); err != nil {
|
||||||
log.Printf("WARNING: While loading backup information: %s", err)
|
log.Warn("guest.GetGuest").Str("guest", g.Name).Err(err).Send()
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
@ -262,7 +262,7 @@ func GetGuest(name string, conn *libvirt.Connect) (*VM, error) {
|
|||||||
|
|
||||||
// Close closes an open connection
|
// Close closes an open connection
|
||||||
func (g *VM) Close() error {
|
func (g *VM) Close() error {
|
||||||
log.Println("Closing VM", g.Name)
|
log.Info("guest.Close").Str("guest", g.Name).Msg("closing vm")
|
||||||
close(g.close)
|
close(g.close)
|
||||||
return <-g.closeErr
|
return <-g.closeErr
|
||||||
}
|
}
|
||||||
|
@ -1,38 +0,0 @@
|
|||||||
package host
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/wcharczuk/go-chart/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This file contains utilities to create charts based on the different data
|
|
||||||
// When a chart is rendered, it can return either an SVG or PNG. SVG is preferrable.
|
|
||||||
|
|
||||||
func (h *Host) ChartMemory() string {
|
|
||||||
h.getNodeInfo()
|
|
||||||
log.Println("Generating Chart")
|
|
||||||
memFree := float64(h.NodeMemory.Free)
|
|
||||||
memCached := float64(h.NodeMemory.Cached)
|
|
||||||
memBuffer := float64(h.NodeMemory.Buffers)
|
|
||||||
memTotal := float64(h.NodeMemory.Total)
|
|
||||||
|
|
||||||
c := chart.PieChart{
|
|
||||||
Title: fmt.Sprintf("Memory Info %s", h.SystemHostName),
|
|
||||||
Width: 256,
|
|
||||||
Height: 256,
|
|
||||||
Values: []chart.Value{
|
|
||||||
{Value: memTotal - memFree, Label: fmt.Sprintf("%.2f%% Free", memFree/memTotal*100)},
|
|
||||||
{Value: memTotal - memCached, Label: fmt.Sprintf("%.2f%% Cached", memCached/memTotal*100)},
|
|
||||||
{Value: memTotal - memBuffer, Label: fmt.Sprintf("%.2f%% Buffers", memBuffer/memTotal*100)},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
sb := new(strings.Builder)
|
|
||||||
log.Println("Rendering chart")
|
|
||||||
if err := c.Render(chart.SVG, sb); err != nil {
|
|
||||||
return err.Error()
|
|
||||||
}
|
|
||||||
return sb.String()
|
|
||||||
}
|
|
12
lib/host/errors.go
Normal file
12
lib/host/errors.go
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
package host
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// XML errors
|
||||||
|
ErrGetXML = errors.New("error getting XML")
|
||||||
|
ErrParseXML = errors.New("error parsing XML")
|
||||||
|
|
||||||
|
// Hostinfo errors
|
||||||
|
ErrHostInfo = errors.New("error processing HostInfo")
|
||||||
|
)
|
140
lib/host/lib.go
140
lib/host/lib.go
@ -7,10 +7,10 @@
|
|||||||
package host
|
package host
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"git.staur.ca/stobbsm/clustvirt/lib/guest"
|
"git.staur.ca/stobbsm/clustvirt/lib/guest"
|
||||||
|
"git.staur.ca/stobbsm/clustvirt/lib/log"
|
||||||
"libvirt.org/go/libvirt"
|
"libvirt.org/go/libvirt"
|
||||||
"libvirt.org/go/libvirtxml"
|
"libvirt.org/go/libvirtxml"
|
||||||
)
|
)
|
||||||
@ -114,7 +114,7 @@ func (h *Host) GetGuestByName(name string) (*guest.VM, error) {
|
|||||||
|
|
||||||
// Close triggers closing the host connection
|
// Close triggers closing the host connection
|
||||||
func (h *Host) Close() error {
|
func (h *Host) Close() error {
|
||||||
log.Println("Closing Host", h.HostName)
|
log.Info("Host.Close").Str("hostname", h.HostName).Msg("closing connection")
|
||||||
close(h.close)
|
close(h.close)
|
||||||
return <-h.closeErr
|
return <-h.closeErr
|
||||||
}
|
}
|
||||||
@ -151,29 +151,33 @@ func (h *Host) hostInfo() {
|
|||||||
|
|
||||||
rawxml, err := h.conn.GetCapabilities()
|
rawxml, err := h.conn.GetCapabilities()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error getting host capabilities XML: %s", err)
|
log.Error("Host.hostInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
|
||||||
}
|
}
|
||||||
xmldoc := &libvirtxml.Caps{}
|
xmldoc := &libvirtxml.Caps{}
|
||||||
if err = xmldoc.Unmarshal(rawxml); err != nil {
|
if err = xmldoc.Unmarshal(rawxml); err != nil {
|
||||||
log.Printf("error parsing host capabilities XML: %s", err)
|
log.Error("Host.hostInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
|
||||||
}
|
}
|
||||||
h.HostInfo = &xmldoc.Host
|
h.HostInfo = &xmldoc.Host
|
||||||
h.SystemHostName, err = h.conn.GetHostname()
|
h.SystemHostName, err = h.conn.GetHostname()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("error getting system host name: %s", err)
|
log.Error("Host.hostInfo").Str("hostname", h.HostName).Err(err).Msg("unable to set SystemHostName")
|
||||||
} else {
|
}
|
||||||
|
if h.SystemHostName == "" {
|
||||||
h.SystemHostName = h.HostName
|
h.SystemHostName = h.HostName
|
||||||
}
|
}
|
||||||
|
log.Info("Host.hostInfo").Str("hostname", h.HostName).Str("system hostname", h.SystemHostName).Msg("set system hostname")
|
||||||
h.LibVersion, err = h.conn.GetLibVersion()
|
h.LibVersion, err = h.conn.GetLibVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.hostInfo").Str("hostname", h.HostName).Err(err).Msg("unable to get libversion")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
log.Info("Host.hostInfo").Str("hostname", h.HostName).Uint32("libversion", h.LibVersion).Send()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Host) memoryInfo() {
|
func (h *Host) memoryInfo() {
|
||||||
mi, err := h.conn.GetMemoryStats(libvirt.NODE_MEMORY_STATS_ALL_CELLS, 0)
|
mi, err := h.conn.GetMemoryStats(libvirt.NODE_MEMORY_STATS_ALL_CELLS, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.memoryInfo").Str("hostname", h.HostName).Err(err).Send()
|
||||||
}
|
}
|
||||||
h.NodeMemory = &NodeMemoryInfo{
|
h.NodeMemory = &NodeMemoryInfo{
|
||||||
Total: mi.Total,
|
Total: mi.Total,
|
||||||
@ -184,20 +188,20 @@ func (h *Host) memoryInfo() {
|
|||||||
|
|
||||||
h.SysInfo, err = h.conn.GetSysinfo(0)
|
h.SysInfo, err = h.conn.GetSysinfo(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.memoryInfo").Str("hostname", h.HostName).Err(err).Msg("failed to GetSysInfo")
|
||||||
}
|
}
|
||||||
|
|
||||||
h.Alive, err = h.conn.IsAlive()
|
h.Alive, err = h.conn.IsAlive()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.memoryInfo").Str("hostname", h.HostName).Err(err).Msg("failed check to IsAlive")
|
||||||
}
|
}
|
||||||
h.Encrypted, err = h.conn.IsEncrypted()
|
h.Encrypted, err = h.conn.IsEncrypted()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.memoryInfo").Str("hostname", h.HostName).Err(err).Msg("failed to check IsEncrypted")
|
||||||
}
|
}
|
||||||
h.Secure, err = h.conn.IsSecure()
|
h.Secure, err = h.conn.IsSecure()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.memoryInfo").Str("hostname", h.HostName).Err(err).Msg("failed to check IsSecure")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +209,7 @@ func (h *Host) getStoragePools() {
|
|||||||
// Get list of all storage pools on the host
|
// Get list of all storage pools on the host
|
||||||
spools, err := h.conn.ListAllStoragePools(0)
|
spools, err := h.conn.ListAllStoragePools(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.getStoragePools").Str("hostname", h.HostName).Err(err).Msg("failed to ListAllStoragePools")
|
||||||
}
|
}
|
||||||
if len(spools) > 0 {
|
if len(spools) > 0 {
|
||||||
h.StoragePoolList = make([]*libvirtxml.StoragePool, len(spools))
|
h.StoragePoolList = make([]*libvirtxml.StoragePool, len(spools))
|
||||||
@ -216,13 +220,13 @@ func (h *Host) getStoragePools() {
|
|||||||
// Get the XML represenation of each storage pool, parse it with libvirtxml
|
// Get the XML represenation of each storage pool, parse it with libvirtxml
|
||||||
rawxml, err := s.GetXMLDesc(0)
|
rawxml, err := s.GetXMLDesc(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("error getting storage pool xml: %s", err)
|
log.Error("Host.getStoragePools").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
xmldoc := &libvirtxml.StoragePool{}
|
xmldoc := &libvirtxml.StoragePool{}
|
||||||
err = xmldoc.Unmarshal(rawxml)
|
err = xmldoc.Unmarshal(rawxml)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("error parsing storage pool XML: %s", err)
|
log.Error("Host.getStoragePools").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
h.StoragePoolList[i] = xmldoc
|
h.StoragePoolList[i] = xmldoc
|
||||||
@ -230,7 +234,7 @@ func (h *Host) getStoragePools() {
|
|||||||
// Get list of all storage volumes in the current storage pool
|
// Get list of all storage volumes in the current storage pool
|
||||||
svols, err := s.ListAllStorageVolumes(0)
|
svols, err := s.ListAllStorageVolumes(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.getStoragePools").Str("hostname", h.HostName).Str("storagepool", h.StoragePoolList[i].Name).Err(err).Msg("failed to ListAllStorageVolumes")
|
||||||
}
|
}
|
||||||
if len(svols) > 0 {
|
if len(svols) > 0 {
|
||||||
// define temporary variable to hold slice of StorageVolume, that can
|
// define temporary variable to hold slice of StorageVolume, that can
|
||||||
@ -243,29 +247,32 @@ func (h *Host) getStoragePools() {
|
|||||||
defer sv.Free()
|
defer sv.Free()
|
||||||
rawxml, err = sv.GetXMLDesc(0)
|
rawxml, err = sv.GetXMLDesc(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error getting XML from storage volume: %s", err)
|
log.Error("Host.getStoragePools").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
xmldoc := &libvirtxml.StorageVolume{}
|
xmldoc := &libvirtxml.StorageVolume{}
|
||||||
err = xmldoc.Unmarshal(rawxml)
|
err = xmldoc.Unmarshal(rawxml)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error parsing storage volume XML: %s", err)
|
log.Error("Host.getStoragePools").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tvl[j] = xmldoc
|
tvl[j] = xmldoc
|
||||||
|
log.Info("Host.getStoragePools").Str("hostname", h.HostName).Str("added volume", tvl[j].Name).Send()
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
// Append the contents of tvl to h.VolumeList
|
// Append the contents of tvl to h.VolumeList
|
||||||
if h.VolumeList == nil {
|
if h.VolumeList == nil {
|
||||||
|
log.Info("Host.getStoragePools").Str("hostname", h.HostName).Str("storagepool", h.StoragePoolList[i].Name).Msg("initializing VolumeList")
|
||||||
h.VolumeList = []*libvirtxml.StorageVolume{}
|
h.VolumeList = []*libvirtxml.StorageVolume{}
|
||||||
}
|
}
|
||||||
// Only append if the temporary storage volume isn't nil
|
// Only append if the temporary storage volume isn't nil
|
||||||
|
log.Info("Host.getStoragePools").Str("hostname", h.HostName).Str("storagepool", h.StoragePoolList[i].Name).Int("VolumeList count", len(h.VolumeList)).Msg("before filter")
|
||||||
for _, tsv := range tvl {
|
for _, tsv := range tvl {
|
||||||
if tsv != nil {
|
if tsv != nil {
|
||||||
h.VolumeList = append(h.VolumeList, tsv)
|
h.VolumeList = append(h.VolumeList, tsv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
log.Info("Host.getStoragePools").Str("hostname", h.HostName).Str("storagepool", h.StoragePoolList[i].Name).Int("VolumeList count", len(h.VolumeList)).Msg("after filter")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -275,7 +282,7 @@ func (h *Host) getStoragePools() {
|
|||||||
func (h *Host) getSecretsInfo() {
|
func (h *Host) getSecretsInfo() {
|
||||||
nsecrets, err := h.conn.ListAllSecrets(0)
|
nsecrets, err := h.conn.ListAllSecrets(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error loading secrets from host: %s", err)
|
log.Error("Host.getSecretsInfo").Str("hostname", h.HostName).Err(err).Send()
|
||||||
}
|
}
|
||||||
if len(nsecrets) > 0 {
|
if len(nsecrets) > 0 {
|
||||||
h.SecretList = make([]*libvirtxml.Secret, len(nsecrets))
|
h.SecretList = make([]*libvirtxml.Secret, len(nsecrets))
|
||||||
@ -284,11 +291,11 @@ func (h *Host) getSecretsInfo() {
|
|||||||
defer s.Free()
|
defer s.Free()
|
||||||
rawxml, err := s.GetXMLDesc(0)
|
rawxml, err := s.GetXMLDesc(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error getting secret XML", err)
|
log.Error("Host.getSecretsInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
|
||||||
}
|
}
|
||||||
xmldoc := &libvirtxml.Secret{}
|
xmldoc := &libvirtxml.Secret{}
|
||||||
if err = xmldoc.Unmarshal(rawxml); err != nil {
|
if err = xmldoc.Unmarshal(rawxml); err != nil {
|
||||||
log.Printf("error parsing secret XML: %s", err)
|
log.Error("Host.getSecretsInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
|
||||||
}
|
}
|
||||||
h.SecretList[i] = xmldoc
|
h.SecretList[i] = xmldoc
|
||||||
}()
|
}()
|
||||||
@ -300,7 +307,8 @@ func (h *Host) getDomainInfo() {
|
|||||||
// getDomainInfo
|
// getDomainInfo
|
||||||
doms, err := h.conn.ListAllDomains(0)
|
doms, err := h.conn.ListAllDomains(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.getDomainInfo").Str("hostname", h.HostName).Err(err).Send()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if len(doms) > 0 {
|
if len(doms) > 0 {
|
||||||
h.VMList = make([]*libvirtxml.Domain, len(doms))
|
h.VMList = make([]*libvirtxml.Domain, len(doms))
|
||||||
@ -309,14 +317,15 @@ func (h *Host) getDomainInfo() {
|
|||||||
defer d.Free()
|
defer d.Free()
|
||||||
rawxml, err := d.GetXMLDesc(0)
|
rawxml, err := d.GetXMLDesc(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error getting domain XML: %s", err)
|
log.Error("Host.getDomainInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
h.VMList[i] = &libvirtxml.Domain{}
|
h.VMList[i] = &libvirtxml.Domain{}
|
||||||
if err = h.VMList[i].Unmarshal(rawxml); err != nil {
|
if err = h.VMList[i].Unmarshal(rawxml); err != nil {
|
||||||
log.Printf("error parsing domain XML: %s", err)
|
log.Error("Host.getDomainInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -325,7 +334,7 @@ func (h *Host) getIfaceInfo() {
|
|||||||
// getIfaceInfo
|
// getIfaceInfo
|
||||||
ifaces, err := h.conn.ListAllInterfaces(0)
|
ifaces, err := h.conn.ListAllInterfaces(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.getIfaceInfo").Str("hostname", h.HostName).Err(err).Send()
|
||||||
}
|
}
|
||||||
if len(ifaces) > 0 {
|
if len(ifaces) > 0 {
|
||||||
h.NetIfFList = make([]*libvirtxml.Interface, len(ifaces))
|
h.NetIfFList = make([]*libvirtxml.Interface, len(ifaces))
|
||||||
@ -334,12 +343,12 @@ func (h *Host) getIfaceInfo() {
|
|||||||
defer ni.Free()
|
defer ni.Free()
|
||||||
rawxml, err := ni.GetXMLDesc(0)
|
rawxml, err := ni.GetXMLDesc(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error getting interface XML: %s")
|
log.Error("Host.getIfaceInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
h.NetIfFList[i] = &libvirtxml.Interface{}
|
h.NetIfFList[i] = &libvirtxml.Interface{}
|
||||||
if err = h.NetIfFList[i].Unmarshal(rawxml); err != nil {
|
if err = h.NetIfFList[i].Unmarshal(rawxml); err != nil {
|
||||||
log.Printf("error parsing interface XML: %s")
|
log.Error("Host.getIfaceInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -349,31 +358,26 @@ func (h *Host) getIfaceInfo() {
|
|||||||
|
|
||||||
func (h *Host) getNetsInfo() {
|
func (h *Host) getNetsInfo() {
|
||||||
// getNetsInfo
|
// getNetsInfo
|
||||||
nets, err := h.conn.ListNetworks()
|
nets, err := h.conn.ListAllNetworks(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.getNetsInfo").Str("hostname", h.HostName).Err(err).Send()
|
||||||
}
|
}
|
||||||
if len(nets) > 0 {
|
if len(nets) > 0 {
|
||||||
h.NetworkList = make([]NetworkInfo, len(nets))
|
h.NetworkList = make([]*libvirtxml.Network, len(nets))
|
||||||
for i, netName := range nets {
|
for i, net := range nets {
|
||||||
net, err := h.conn.LookupNetworkByName(netName)
|
func() {
|
||||||
if err != nil {
|
defer net.Free()
|
||||||
log.Println(err)
|
rawxml, err := net.GetXMLDesc(0)
|
||||||
}
|
if err != nil {
|
||||||
if h.NetworkList[i].Name, err = net.GetName(); err != nil {
|
log.Error("Host.getNetsInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
|
||||||
log.Println(err)
|
return
|
||||||
}
|
}
|
||||||
if h.NetworkList[i].UUID, err = net.GetUUID(); err != nil {
|
h.NetworkList[i] = &libvirtxml.Network{}
|
||||||
log.Println(err)
|
if err = h.NetworkList[i].Unmarshal(rawxml); err != nil {
|
||||||
}
|
log.Error("Host.getNetsInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
|
||||||
if h.NetworkList[i].XML, err = net.GetXMLDesc(0); err != nil {
|
return
|
||||||
log.Println(err)
|
}
|
||||||
}
|
}()
|
||||||
if h.NetworkList[i].Active, err = net.IsActive(); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
net.Free()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -381,28 +385,24 @@ func (h *Host) getNetsInfo() {
|
|||||||
func (h *Host) getDevicesInfo() {
|
func (h *Host) getDevicesInfo() {
|
||||||
ndevs, err := h.conn.ListAllNodeDevices(0)
|
ndevs, err := h.conn.ListAllNodeDevices(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.getDevicesInfo").Str("hostname", h.HostName).Err(err).Send()
|
||||||
}
|
}
|
||||||
if len(ndevs) > 0 {
|
if len(ndevs) > 0 {
|
||||||
h.DeviceList = make([]DeviceInfo, len(ndevs))
|
h.DeviceList = make([]*libvirtxml.NodeDevice, len(ndevs))
|
||||||
for i, dev := range ndevs {
|
for i, dev := range ndevs {
|
||||||
if h.DeviceList[i].Name, err = dev.GetName(); err != nil {
|
func() {
|
||||||
log.Println(err)
|
defer dev.Free()
|
||||||
}
|
rawxml, err := dev.GetXMLDesc(0)
|
||||||
if h.DeviceList[i].Capabilities, err = dev.ListCaps(); err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Error("Host.getDevicesInfo").Str("hostname", h.HostName).Err(ErrGetXML).Err(err).Send()
|
||||||
}
|
return
|
||||||
if h.DeviceList[i].XML, err = dev.GetXMLDesc(0); err != nil {
|
}
|
||||||
log.Println(err)
|
h.DeviceList[i] = &libvirtxml.NodeDevice{}
|
||||||
}
|
if err = h.DeviceList[i].Unmarshal(rawxml); err != nil {
|
||||||
dx := &libvirtxml.NodeDevice{}
|
log.Error("Host.getDevicesInfo").Str("hostname", h.HostName).Err(ErrParseXML).Err(err).Send()
|
||||||
if err != dx.Unmarshal(h.DeviceList[i].XML); err != nil {
|
return
|
||||||
log.Println(err)
|
}
|
||||||
}
|
}()
|
||||||
h.DeviceList[i].Driver = dx.Driver.Name
|
|
||||||
dx.Capability.PCI.Class
|
|
||||||
|
|
||||||
dev.Free()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
package host
|
package host
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"git.staur.ca/stobbsm/clustvirt/lib/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// URI is a string type, accessed via the pre-defined variables, and represent
|
// URI is a string type, accessed via the pre-defined variables, and represent
|
||||||
@ -72,7 +73,12 @@ func (u *URI) ConnectionString(h string) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Printf("Connection URI: %s", sb.String())
|
log.Info("Host.ConnectionString").
|
||||||
|
Str("uri.Driver", u.Driver).
|
||||||
|
Str("uri.Transport", u.Transport).
|
||||||
|
Str("uri.Path", u.Path).
|
||||||
|
Strs("uri.Options", u.Options).
|
||||||
|
Str("builtUri", sb.String()).Send()
|
||||||
return sb.String()
|
return sb.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
30
lib/log/logs.go
Normal file
30
lib/log/logs.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
// Package logs provides a central location for ClustVirt logging, wrapping a Zerolog
|
||||||
|
// instance to do the job.
|
||||||
|
// This will standarize logging throughout ClustVirt, and make it easier to debug
|
||||||
|
// issues.
|
||||||
|
// The default is to log to Stderr
|
||||||
|
// TODO: enable logging to syslog through configuration
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var defaultLogger zerolog.Logger
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
defaultLogger = zerolog.New(zerolog.ConsoleWriter{
|
||||||
|
Out: os.Stderr,
|
||||||
|
TimeFormat: zerolog.TimeFormatUnix,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Trace(method string) *zerolog.Event { return defaultLogger.Trace().Str("method", method) }
|
||||||
|
func Debug(method string) *zerolog.Event { return defaultLogger.Debug().Str("method", method) }
|
||||||
|
func Info(method string) *zerolog.Event { return defaultLogger.Info().Str("method", method) }
|
||||||
|
func Warn(method string) *zerolog.Event { return defaultLogger.Warn().Str("method", method) }
|
||||||
|
func Error(method string) *zerolog.Event { return defaultLogger.Error().Str("method", method) }
|
||||||
|
func Fatal(method string) *zerolog.Event { return defaultLogger.Fatal().Str("method", method) }
|
||||||
|
func Panic(method string) *zerolog.Event { return defaultLogger.Panic().Str("method", method) }
|
3
main.go
3
main.go
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/a-h/templ"
|
"github.com/a-h/templ"
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
"github.com/go-chi/chi/v5/middleware"
|
"github.com/go-chi/chi/v5/middleware"
|
||||||
|
localmw "git.staur.ca/stobbsm/clustvirt/router/middleware"
|
||||||
)
|
)
|
||||||
|
|
||||||
const DEBUG bool = true
|
const DEBUG bool = true
|
||||||
@ -37,7 +38,7 @@ func main() {
|
|||||||
fs := http.StripPrefix("/static/", http.FileServer(http.Dir("public")))
|
fs := http.StripPrefix("/static/", http.FileServer(http.Dir("public")))
|
||||||
|
|
||||||
r := chi.NewRouter()
|
r := chi.NewRouter()
|
||||||
r.Use(middleware.Logger)
|
r.Use(localmw.Logger)
|
||||||
if DEBUG {
|
if DEBUG {
|
||||||
r.Use(middleware.NoCache)
|
r.Use(middleware.NoCache)
|
||||||
}
|
}
|
||||||
|
3
router/api/api.go
Normal file
3
router/api/api.go
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
// Package api defines the routes for the REST API
|
||||||
|
package api
|
||||||
|
|
4
router/htmx/htmx.go
Normal file
4
router/htmx/htmx.go
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
// Package htmx contains the routes for the WebUI HTMX
|
||||||
|
package htmx
|
||||||
|
|
||||||
|
|
29
router/middleware/logger.go
Normal file
29
router/middleware/logger.go
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.staur.ca/stobbsm/clustvirt/lib/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Logger uses the in package log module to handle route logging
|
||||||
|
func Logger(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
start := time.Now()
|
||||||
|
defer func() {
|
||||||
|
log.Info("router.middleware.Logger").
|
||||||
|
Str("httpMethod", r.Method).
|
||||||
|
Str("host", r.Host).
|
||||||
|
Str("uri", r.URL.RequestURI()).
|
||||||
|
Str("user_agent", r.UserAgent()).
|
||||||
|
Str("referer", r.Referer()).
|
||||||
|
Strs("transfer-encoding", r.TransferEncoding).
|
||||||
|
Int64("length", r.ContentLength).
|
||||||
|
Dur("elasped_ms", time.Since(start)).
|
||||||
|
Msg("incoming connection")
|
||||||
|
}()
|
||||||
|
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
57
router/router.go
Normal file
57
router/router.go
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
// Package router defines the base routes http server
|
||||||
|
package router
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.staur.ca/stobbsm/clustvirt/lib/log"
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"github.com/go-chi/chi/v5/middleware"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Server struct {
|
||||||
|
bindAddr string
|
||||||
|
ssl bool
|
||||||
|
middleware []http.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new HTTP Server instance.
|
||||||
|
// Requires the IP and port number to bind to
|
||||||
|
func New(listen string, port int) *Server {
|
||||||
|
s := &Server{bindAddr: fmt.Sprintf("%s:%d", listen, port)}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the server and initializes the router and common middleware
|
||||||
|
func (s *Server) Start() {
|
||||||
|
tstart := time.Now()
|
||||||
|
defer func() {
|
||||||
|
log.Info("router.Server.Start").
|
||||||
|
Dur("upTime", time.Since(tstart)).
|
||||||
|
Msg("http server stopped")
|
||||||
|
}()
|
||||||
|
router := chi.NewRouter()
|
||||||
|
|
||||||
|
indev, _ := os.LookupEnv("CLUSTVIRT_DEV")
|
||||||
|
indev = strings.ToLower(indev)
|
||||||
|
switch indev {
|
||||||
|
case "true":
|
||||||
|
fallthrough
|
||||||
|
case "1":
|
||||||
|
fallthrough
|
||||||
|
case "yes":
|
||||||
|
fallthrough
|
||||||
|
case "on":
|
||||||
|
router.Use(middleware.NoCache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) AddMiddleware(m http.Handler) {
|
||||||
|
s.middleware = append(s.middleware,
|
||||||
|
m)
|
||||||
|
}
|
@ -1,8 +1,7 @@
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"git.staur.ca/stobbsm/clustvirt/lib/log"
|
||||||
|
|
||||||
"github.com/jaypipes/pcidb"
|
"github.com/jaypipes/pcidb"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -13,7 +12,7 @@ var (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
pcidbNOTFOUND string = `NOTFOUND`
|
pcidbNOTFOUND string = `NOTFOUND`
|
||||||
pcidbNODB string = `NODBFOUND`
|
pcidbNODB string = `NODBFOUND`
|
||||||
)
|
)
|
||||||
|
|
||||||
func initPCIDB() {
|
func initPCIDB() {
|
||||||
@ -23,11 +22,10 @@ func initPCIDB() {
|
|||||||
// local sources aren't found
|
// local sources aren't found
|
||||||
db, err = pcidb.New()
|
db, err = pcidb.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("warning: couldn't use local pcidb cache: %s", err)
|
log.Warn("util.initPCIDB").Err(err).Msg("fallback to downloading pcidb")
|
||||||
log.Println("falling back to downloading database")
|
|
||||||
db, err = pcidb.New(pcidb.WithEnableNetworkFetch())
|
db, err = pcidb.New(pcidb.WithEnableNetworkFetch())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("error: couldn't get pcidb. no more fallbacks available, will not be able to query the pcidb")
|
log.Error("util.initPCIDB").Err(err).Msg("no more fallbacks available")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pcidbInitDone = true
|
pcidbInitDone = true
|
||||||
@ -38,7 +36,7 @@ func GetPCIClass(id string) string {
|
|||||||
initPCIDB()
|
initPCIDB()
|
||||||
}
|
}
|
||||||
if pcidbInitDone && db == nil {
|
if pcidbInitDone && db == nil {
|
||||||
log.Println("unable to access pcidb")
|
log.Warn("util.GetPCIClass").Msg("no pcidb to do lookup")
|
||||||
return pcidbNODB
|
return pcidbNODB
|
||||||
}
|
}
|
||||||
if class, ok := db.Classes[id]; ok {
|
if class, ok := db.Classes[id]; ok {
|
||||||
|
@ -10,10 +10,11 @@ import (
|
|||||||
templ ClusterInfo(cs *cluster.ClusterStats, diff cluster.StatDiff, navbar []components.NavItem) {
|
templ ClusterInfo(cs *cluster.ClusterStats, diff cluster.StatDiff, navbar []components.NavItem) {
|
||||||
@layouts.Manager("ClustVirt", "Cluster Manager", navbar) {
|
@layouts.Manager("ClustVirt", "Cluster Manager", navbar) {
|
||||||
<h3>Cluster Stats</h3>
|
<h3>Cluster Stats</h3>
|
||||||
|
@CPUStats(cs, diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
templ CPUStats() {
|
templ CPUStats(cs *cluster.ClusterStats, diff cluster.StatDiff) {
|
||||||
<table class={ "table-auto", "w-full" }>
|
<table class={ "table-auto", "w-full" }>
|
||||||
<caption class={ "caption-top" }>
|
<caption class={ "caption-top" }>
|
||||||
CPU stats
|
CPU stats
|
||||||
|
@ -40,141 +40,7 @@ func ClusterInfo(cs *cluster.ClusterStats, diff cluster.StatDiff, navbar []compo
|
|||||||
if templ_7745c5c3_Err != nil {
|
if templ_7745c5c3_Err != nil {
|
||||||
return templ_7745c5c3_Err
|
return templ_7745c5c3_Err
|
||||||
}
|
}
|
||||||
var templ_7745c5c3_Var3 = []any{"table-auto", "w-full"}
|
templ_7745c5c3_Err = CPUStats(cs, diff).Render(ctx, templ_7745c5c3_Buffer)
|
||||||
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var3...)
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<table class=\"")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ.CSSClasses(templ_7745c5c3_Var3).String()))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("\">")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
var templ_7745c5c3_Var4 = []any{"caption-top"}
|
|
||||||
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var4...)
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<caption class=\"")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ.CSSClasses(templ_7745c5c3_Var4).String()))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("\">CPU stats</caption> <thead><tr><th>Sockets</th><th>Cores</th><th>Threads</th><th>Allocated</th></tr></thead> <tbody><tr><td>")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
var templ_7745c5c3_Var5 string
|
|
||||||
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Sockets))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 27, Col: 35}
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
var templ_7745c5c3_Var6 string
|
|
||||||
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Cores))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 30, Col: 33}
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
var templ_7745c5c3_Var7 string
|
|
||||||
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Threads))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 33, Col: 35}
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
var templ_7745c5c3_Var8 string
|
|
||||||
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Allocated))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 36, Col: 37}
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td></tr><tr><td>")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
var templ_7745c5c3_Var9 string
|
|
||||||
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Sockets))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 41, Col: 42}
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
var templ_7745c5c3_Var10 string
|
|
||||||
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Cores))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 44, Col: 40}
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
var templ_7745c5c3_Var11 string
|
|
||||||
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Threads))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 47, Col: 42}
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
var templ_7745c5c3_Var12 string
|
|
||||||
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Allocated))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 50, Col: 44}
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
|
|
||||||
if templ_7745c5c3_Err != nil {
|
|
||||||
return templ_7745c5c3_Err
|
|
||||||
}
|
|
||||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td></tr></tbody></table>")
|
|
||||||
if templ_7745c5c3_Err != nil {
|
if templ_7745c5c3_Err != nil {
|
||||||
return templ_7745c5c3_Err
|
return templ_7745c5c3_Err
|
||||||
}
|
}
|
||||||
@ -193,3 +59,161 @@ func ClusterInfo(cs *cluster.ClusterStats, diff cluster.StatDiff, navbar []compo
|
|||||||
return templ_7745c5c3_Err
|
return templ_7745c5c3_Err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CPUStats(cs *cluster.ClusterStats, diff cluster.StatDiff) templ.Component {
|
||||||
|
return templ.ComponentFunc(func(ctx context.Context, templ_7745c5c3_W io.Writer) (templ_7745c5c3_Err error) {
|
||||||
|
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templ_7745c5c3_W.(*bytes.Buffer)
|
||||||
|
if !templ_7745c5c3_IsBuffer {
|
||||||
|
templ_7745c5c3_Buffer = templ.GetBuffer()
|
||||||
|
defer templ.ReleaseBuffer(templ_7745c5c3_Buffer)
|
||||||
|
}
|
||||||
|
ctx = templ.InitializeContext(ctx)
|
||||||
|
templ_7745c5c3_Var3 := templ.GetChildren(ctx)
|
||||||
|
if templ_7745c5c3_Var3 == nil {
|
||||||
|
templ_7745c5c3_Var3 = templ.NopComponent
|
||||||
|
}
|
||||||
|
ctx = templ.ClearChildren(ctx)
|
||||||
|
var templ_7745c5c3_Var4 = []any{"table-auto", "w-full"}
|
||||||
|
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var4...)
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<table class=\"")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ.CSSClasses(templ_7745c5c3_Var4).String()))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("\">")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
var templ_7745c5c3_Var5 = []any{"caption-top"}
|
||||||
|
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var5...)
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<caption class=\"")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ.CSSClasses(templ_7745c5c3_Var5).String()))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("\">CPU stats</caption> <thead><tr><th></th><th>Sockets</th><th>Cores</th><th>Threads</th><th>Allocated</th></tr></thead> <tbody><tr><td>Latest</td><td>")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
var templ_7745c5c3_Var6 string
|
||||||
|
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Sockets))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 36, Col: 33}
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
var templ_7745c5c3_Var7 string
|
||||||
|
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Cores))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 39, Col: 31}
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
var templ_7745c5c3_Var8 string
|
||||||
|
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Threads))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 42, Col: 33}
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
var templ_7745c5c3_Var9 string
|
||||||
|
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(cs.CPU.Allocated))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 45, Col: 35}
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td></tr><tr><td>Change</td><td>")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
var templ_7745c5c3_Var10 string
|
||||||
|
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Sockets))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 53, Col: 35}
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
var templ_7745c5c3_Var11 string
|
||||||
|
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Cores))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 56, Col: 33}
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
var templ_7745c5c3_Var12 string
|
||||||
|
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Threads))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 59, Col: 35}
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td><td>")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
var templ_7745c5c3_Var13 string
|
||||||
|
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprint(diff.CPU.Allocated))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/cluster.templ`, Line: 62, Col: 37}
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</td></tr></tbody></table>")
|
||||||
|
if templ_7745c5c3_Err != nil {
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
}
|
||||||
|
if !templ_7745c5c3_IsBuffer {
|
||||||
|
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteTo(templ_7745c5c3_W)
|
||||||
|
}
|
||||||
|
return templ_7745c5c3_Err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user