295 lines
6.4 KiB
Go
295 lines
6.4 KiB
Go
package cluster
|
|
|
|
// ClusterStats is used to gather stats for the entire cluster
|
|
type ClusterStats struct {
|
|
CPU struct {
|
|
Sockets uint32
|
|
Cores uint32
|
|
Threads uint32
|
|
Allocated uint32
|
|
}
|
|
Memory struct {
|
|
Total uint64
|
|
Free uint64
|
|
Buffers uint64
|
|
Cached uint64
|
|
Allocated uint64
|
|
}
|
|
Storage struct {
|
|
Total uint64
|
|
Used uint64
|
|
Free uint64
|
|
Active uint32
|
|
Inactive uint32
|
|
Pools uint32
|
|
|
|
Volumes struct {
|
|
Total uint32
|
|
Active uint32
|
|
Inactive uint32
|
|
}
|
|
}
|
|
VM struct {
|
|
Count uint32
|
|
Started uint32
|
|
Stopped uint32
|
|
}
|
|
Host struct {
|
|
Count uint32
|
|
Available uint32
|
|
}
|
|
Network struct {
|
|
Count uint32
|
|
Active uint32
|
|
Inactive uint32
|
|
}
|
|
|
|
old *ClusterStats
|
|
c *Cluster
|
|
}
|
|
|
|
// ClusterStats is used to gather stats for the entire cluster
|
|
type StatDiff struct {
|
|
CPU struct {
|
|
Sockets int
|
|
Cores int
|
|
Threads int
|
|
Allocated int
|
|
}
|
|
Memory struct {
|
|
Total int
|
|
Free int
|
|
Buffers int
|
|
Cached int
|
|
Allocated int
|
|
}
|
|
Storage struct {
|
|
Total int
|
|
Used int
|
|
Free int
|
|
Active int
|
|
Inactive int
|
|
Pools int
|
|
|
|
Volumes struct {
|
|
Total int
|
|
Active int
|
|
Inactive int
|
|
}
|
|
}
|
|
VM struct {
|
|
Count int
|
|
Started int
|
|
Stopped int
|
|
}
|
|
Host struct {
|
|
Count int
|
|
Available int
|
|
}
|
|
Network struct {
|
|
Count int
|
|
Active int
|
|
Inactive int
|
|
}
|
|
}
|
|
|
|
// Init is given a cluster, which it then uses to load the initial statistics
|
|
// Does not close connections, but uses the host connections available to the
|
|
// cluster to add statistics together.
|
|
func Init(c *Cluster) *ClusterStats {
|
|
cs := &ClusterStats{}
|
|
|
|
return cs
|
|
}
|
|
|
|
// Update triggers the stats collector to refresh it's statistics
|
|
func (cs *ClusterStats) Update() {
|
|
cs.old = cs.copy()
|
|
|
|
cs.reset()
|
|
// Start looping through each host in the cluster, adding to the total
|
|
for _, h := range cs.c.hosts {
|
|
cs.Host.Count++
|
|
cs.Host.Available++
|
|
|
|
cs.CPU.Sockets += h.HostInfo.Sockets
|
|
cs.CPU.Cores += h.HostInfo.Cores
|
|
cs.CPU.Threads += h.HostInfo.Threads
|
|
|
|
cs.Memory.Total += h.NodeMemory.Total
|
|
cs.Memory.Free += h.NodeMemory.Free
|
|
cs.Memory.Buffers += h.NodeMemory.Buffers
|
|
cs.Memory.Cached += h.NodeMemory.Cached
|
|
|
|
// Storage Pool counting
|
|
cs.Storage.Pools += uint32(len(h.StoragePoolList))
|
|
countedSharedPools := map[string]struct{}{}
|
|
// Loop through available storage pools
|
|
for _, sp := range h.StoragePoolList {
|
|
if _, ok := countedSharedPools[sp.Name]; ok {
|
|
// Already counted this shared pool, move on
|
|
continue
|
|
}
|
|
if sp.HAEnabled {
|
|
countedSharedPools[sp.Name] = struct{}{}
|
|
}
|
|
if !sp.Active {
|
|
cs.Storage.Inactive++
|
|
continue
|
|
}
|
|
cs.Storage.Active++
|
|
cs.Storage.Total += sp.Capacity
|
|
cs.Storage.Used += sp.Allocation
|
|
cs.Storage.Free += sp.Capacity - sp.Allocation
|
|
// Volumes in the pool
|
|
cs.Storage.Volumes.Total += uint32(len(sp.Volumes))
|
|
for range sp.Volumes {
|
|
cs.Storage.Volumes.Active++
|
|
}
|
|
}
|
|
|
|
// VM Count
|
|
cs.VM.Count += uint32(len(h.VMList))
|
|
for _, vm := range h.VMList {
|
|
cs.CPU.Allocated += uint32(vm.VCPUs)
|
|
cs.Memory.Allocated += uint64(vm.Memory)
|
|
if vm.Active {
|
|
cs.VM.Started++
|
|
continue
|
|
}
|
|
cs.VM.Stopped++
|
|
}
|
|
|
|
// Network count
|
|
cs.Network.Count += uint32(len(h.NetworkList))
|
|
for _, ni := range h.NetworkList {
|
|
if ni.Active {
|
|
cs.Network.Active++
|
|
continue
|
|
}
|
|
cs.Network.Inactive++
|
|
}
|
|
}
|
|
}
|
|
|
|
// Diff returns a map of all the field and how they changed
|
|
func (cs *ClusterStats) Diff() StatDiff {
|
|
return StatDiff{
|
|
CPU: struct {
|
|
Sockets int
|
|
Cores int
|
|
Threads int
|
|
Allocated int
|
|
}{
|
|
Sockets: int(cs.old.CPU.Sockets - cs.CPU.Sockets),
|
|
Cores: int(cs.old.CPU.Cores - cs.CPU.Cores),
|
|
Threads: int(cs.old.CPU.Threads - cs.CPU.Threads),
|
|
Allocated: int(cs.old.CPU.Allocated - cs.CPU.Allocated),
|
|
},
|
|
Memory: struct {
|
|
Total int
|
|
Free int
|
|
Buffers int
|
|
Cached int
|
|
Allocated int
|
|
}{
|
|
Total: int(cs.old.Memory.Total - cs.Memory.Total),
|
|
Free: int(cs.old.Memory.Free - cs.Memory.Free),
|
|
Buffers: int(cs.old.Memory.Buffers - cs.Memory.Buffers),
|
|
Cached: int(cs.old.Memory.Cached - cs.Memory.Cached),
|
|
Allocated: int(cs.old.Memory.Allocated - cs.Memory.Allocated),
|
|
},
|
|
Storage: struct {
|
|
Total int
|
|
Used int
|
|
Free int
|
|
Active int
|
|
Inactive int
|
|
Pools int
|
|
Volumes struct {
|
|
Total int
|
|
Active int
|
|
Inactive int
|
|
}
|
|
}{
|
|
Total: int(cs.old.Storage.Total - cs.Storage.Total),
|
|
Used: int(cs.old.Storage.Used - cs.Storage.Used),
|
|
Free: int(cs.old.Storage.Free - cs.Storage.Free),
|
|
Active: int(cs.old.Storage.Active - cs.Storage.Active),
|
|
Inactive: int(cs.old.Storage.Inactive - cs.Storage.Inactive),
|
|
Pools: int(cs.old.Storage.Pools - cs.Storage.Pools),
|
|
Volumes: struct {
|
|
Total int
|
|
Active int
|
|
Inactive int
|
|
}{
|
|
Total: int(cs.old.Storage.Volumes.Total - cs.Storage.Volumes.Total),
|
|
Active: int(cs.old.Storage.Volumes.Active - cs.Storage.Volumes.Active),
|
|
Inactive: int(cs.old.Storage.Volumes.Inactive - cs.Storage.Volumes.Inactive),
|
|
},
|
|
},
|
|
VM: struct {
|
|
Count int
|
|
Started int
|
|
Stopped int
|
|
}{
|
|
Count: int(cs.old.VM.Count - cs.VM.Count),
|
|
Started: int(cs.old.VM.Started - cs.VM.Started),
|
|
Stopped: int(cs.old.VM.Stopped - cs.VM.Stopped),
|
|
},
|
|
Host: struct {
|
|
Count int
|
|
Available int
|
|
}{
|
|
Count: int(cs.old.Host.Count - cs.Host.Count),
|
|
Available: int(cs.old.Host.Available - cs.Host.Available),
|
|
},
|
|
Network: struct {
|
|
Count int
|
|
Active int
|
|
Inactive int
|
|
}{
|
|
Count: int(cs.old.Network.Count - cs.Network.Count),
|
|
Active: int(cs.old.Network.Active - cs.Network.Active),
|
|
Inactive: int(cs.old.Network.Inactive - cs.Network.Inactive),
|
|
},
|
|
}
|
|
}
|
|
|
|
// copy the clusterstats into a new clusterstatus object for comparison purposes
|
|
func (cs *ClusterStats) copy() *ClusterStats {
|
|
ncs := *cs
|
|
return &ncs
|
|
}
|
|
|
|
// reset all values to zero value
|
|
func (cs *ClusterStats) reset() {
|
|
cs.CPU.Sockets = 0
|
|
cs.CPU.Cores = 0
|
|
cs.CPU.Threads = 0
|
|
cs.CPU.Allocated = 0
|
|
|
|
cs.Memory.Total = 0
|
|
cs.Memory.Free = 0
|
|
cs.Memory.Buffers = 0
|
|
cs.Memory.Cached = 0
|
|
|
|
cs.Storage.Total = 0
|
|
cs.Storage.Used = 0
|
|
cs.Storage.Free = 0
|
|
cs.Storage.Active = 0
|
|
cs.Storage.Inactive = 0
|
|
cs.Storage.Pools = 0
|
|
|
|
cs.VM.Count = 0
|
|
cs.VM.Started = 0
|
|
cs.VM.Stopped = 0
|
|
|
|
cs.Host.Count = 0
|
|
cs.Host.Available = 0
|
|
|
|
cs.Network.Count = 0
|
|
cs.Network.Active = 0
|
|
cs.Network.Inactive = 0
|
|
}
|