+
+
+
+
+{{ end }}
diff --git a/view/host.gohtml b/view/host.gohtml
new file mode 100644
index 0000000..888d47b
--- /dev/null
+++ b/view/host.gohtml
@@ -0,0 +1,4 @@
+{{ define "content" }}
+
{{.HostName}}
+
This will contain all the graphs, storage pool lists, vm lists, etc for the host selected.
+{{ end }}
diff --git a/view/pages.go b/view/pages.go
new file mode 100644
index 0000000..f18ee03
--- /dev/null
+++ b/view/pages.go
@@ -0,0 +1,43 @@
+package view
+
+import (
+ "log"
+ "os"
+)
+
+// Regular pages are defined as views here, like the homepage
+
+// Major components of each page.
+const (
+ index = `view/_index.gohtml`
+ header = `view/_header.gohtml`
+ footer = `view/_footer.gohtml`
+)
+
+// These constitute the static parts of the site that don't need to change, loaded as a template for rendering
+const (
+ home = `view/static/home.gohtml`
+)
+
+func init() {
+ if fi, err := os.Stat(index); err != nil {
+ log.Fatal(fi.Name(), fi.IsDir(), err)
+ }
+ if fi, err := os.Stat(header); err != nil {
+ log.Fatal(fi.Name(), fi.IsDir(), err)
+ }
+ if fi, err := os.Stat(footer); err != nil {
+ log.Fatal(fi.Name(), fi.IsDir(), err)
+ }
+ if fi, err := os.Stat(home); err != nil {
+ log.Fatal(fi.Name(), fi.IsDir(), err)
+ }}
+
+var ViewHome *View
+
+func init() {
+ log.Println("Initializing homepage")
+ var err error
+ ViewHome, err = NewFromFile(home)
+ log.Println(err)
+}
diff --git a/view/static/home.gohtml b/view/static/home.gohtml
new file mode 100644
index 0000000..e1dd37d
--- /dev/null
+++ b/view/static/home.gohtml
@@ -0,0 +1,87 @@
+{{ define "content" }}
+
What is this?
+
+ Clustvirt (work in progress name) aims to be the agnostic cluster controller for libvirtd.
+ The server component is used to display both the WebUI and run the REST API used to control one to many
+ libvirtd hosts to manage virual machines, LXC containers (through libvirtd), gather information about
+ each host, and monitor each host.
+
+
+ The aims of this project are:
+
+
Base OS Agnostic. If it can run libvirtd, this should be able to control it on some level
+
Open source, always
+
Control the Virtual Machine life cycle on one or more libvirtd hosts
+
Add clusting capabilities to libvirtd host, including;
+
Migration of VMs
+
Syncronizing secrets
+
Syncronizing VLANs, bridges, host only networking
+
Sharing HA storage availability
+
Locking shared resources like disks
+
Starting VMs marked for HA on another host when one goes down
+
Manage a library of Cloud-init resources and templates to build new VMs quickly
+
Local Storage management, including local directory, lvm, zfs (if installed)
+
Advanced Storage management, such as Ceph, glusterfs, drbd, iscsi, nfs
+
Storage syncronization of local disks between hosts (zfs snapshots, lvm snapshots, rsync)
+
Backup scheduling, creation, restoration
+
+
+
+ What this project does not, but may someday do (future goals):
+
+
Install the OS which libvirtd is running on
+
Install/provision libvirtd on a host that does not have it installed
+
Tools to move from one vendor to clustvirt/libvirtd
+
VM templates for common aspects of VM creation and management, like appliances
+
External tool access that can be used to manage things that are not managed here (cephadm dashboard, for instance)
+
+
+
+ What this project will NEVER do, even if asked really nicely:
+
Anything that does not have an Open Source standard behind it
+
Directly control a guest Operating System
+
+
+
+ Why does this even exist?
+
+
Broadcom buying VMWare, and VMWare losing a free teir for homelabbers pissed me off
+
Vendor lock-in pisses me off
+
Even good open source Hyperconverged systems (Proxmox, as an example) exhibit a form of vendor lock-in
+
Libvirt is terrific, has the functionality for everything those other providers do, but there really is not a
+ great option for those dipping their toes into Open Source
+
Its fun to build things that solve a need
+
I really want to do it
+
+
+
I recently created a post on reddit announcing that I was building this,
+ and while the majority of responses were supportive, even offering features that may enhance what I originally
+ set out to do, many responded with "Why do we need another one??"
+
Besides the list above about why this exists, I wanted to clarify a few things those individuals did not seeem to
+ get: This is not a rebuild of Proxmox, Cloudstack, VMWare, Harvester or any of the other "Hyper-converged
+ Infrastructer Operating System" offerings out there. This will not take over your base operating system machine, just
+ act as a cluster manager and interface to access the existing libvirtd instances on those machines, nor will it
+ prescribe a set of requirements that make it hard to move your own infrastructure around.
+
At the heart of this project is that I hate the enshitifiation of Open Source that has been going on, where its
+ just another way to make money and control the eco system. RedHat tried to do it by locking down their source code,
+ Proxmox does it by making sure anything you do on Proxmox is tied to Proxmox (no offense to Proxmox), and even
+ Hashicorp, who I loved so dearly, changed from a pure Open Source licensing model to one that protects the business
+ over the community.
+
I will not let that happen here
+
This project will seek to use the Unix philosophy, of building off of existing standards, combining tools, and
+ having one tool do one job well. This does not mean there will be one application for each aspect of the job, but
+ that this application stack will manage Libvirtd well, and have individual and configurable paths to manage each
+ sub aspect of the libvirt stack. This stack will not create a Ceph cluster for you, it leaves you to do that. It
+ will not even talk to a ceph cluster. It will, however, let you add that cluster via configuration options to define
+ it as a storage pool that libvirt can use.
+
If you want something that will allow you to use a single interface to create all sub aspects that can be used by
+ libvirt (managing all firewall rules, creating a ceph cluster, etc.), use something like Proxmox which includes
+ that builtin functionality. This isn't the stack for you.
+{{ end }}
diff --git a/view/view.go b/view/view.go
new file mode 100644
index 0000000..7b3023c
--- /dev/null
+++ b/view/view.go
@@ -0,0 +1,55 @@
+// Package view handles WebUI generation for clustvirt. The methods and utilties in this module control what is viewed,
+// templates that are loaded, and building those templates. Caching is not considered beyond what is done
+// automattically by go (if anything).
+package view
+
+import (
+ "html/template"
+ "io"
+ "log"
+ "os"
+)
+
+// View is responsible for assembling a group of templates, providing
+// methods to add data and compose pages in a common way.
+type View struct {
+ content string
+ template *template.Template
+}
+
+var basetemplate *template.Template
+
+// New returns a new instance of the View, expecting the content to be the actual
+// content as a template defining "content", in string format.
+func New(content string) *View {
+ if basetemplate == nil {
+ log.Println("Initializing base template")
+ basetemplate = template.Must(template.New("").ParseFiles(index, header, footer))
+ }
+ log.Println("Cloning base template")
+ v := &View{template: template.Must(basetemplate.Clone())}
+ v.parse(content)
+
+ return v
+}
+
+// NewFromFile loads a template from a file
+func NewFromFile(file string) (*View, error) {
+ b, err := os.ReadFile(file)
+ return New(string(b)), err
+}
+
+func (v *View) parse(tmpl string) error {
+ log.Println("Parsing template contents")
+ if _, err := v.template.Parse(tmpl); err != nil {
+ return err
+ }
+ log.Println("Template parsed")
+ return nil
+}
+
+// Render returns the executed template with data
+func (v *View) Render(w io.Writer, data any) error {
+ log.Println("Excuting template")
+ return v.template.ExecuteTemplate(w, "_index", data)
+}