mkvm/volumes/volumes.go
2024-12-02 20:23:28 -08:00

213 lines
5.9 KiB
Go

package volumes
import (
"compress/bzip2"
"compress/gzip"
"context"
"crypto/sha256"
"crypto/sha512"
"fmt"
"hash"
"io"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/xi2/xz"
"libvirt.org/go/libvirt"
"mkvm/libvirtx"
"mkvm/volumes/pools"
)
func Create(conn *libvirt.Connect, pool pools.StoragePool, sizeGB int, imageURL string, name string) error {
volume, err := pool.CreateVolume(name, uint64(sizeGB))
if err != nil {
return fmt.Errorf("error creating volume: %v", err)
}
defer libvirtx.Free(volume)
var reader io.Reader
var contentLength int64
start := time.Now()
parsedImageURL, err := url.Parse(imageURL)
if err != nil {
return fmt.Errorf("error parsing URL %s: %v", imageURL, err)
}
// parse everything after the # in the URL as a query, so chain arguments with &
fragmentQuery, err := url.ParseQuery(parsedImageURL.Fragment)
if err != nil && parsedImageURL.Fragment != "" {
return err
}
var actualHash hash.Hash
var expectedHash string
// this is fully untested, but the idea is something like:
// https://example.whatever/img.qcow2#hash=sha256:e8c7c3c983718ebc78d8738f562d55bfa77c4cf6f08241d246861d5ea9eb9cd2
// sha256 and sha512 supported
fragmenthash := strings.SplitN(fragmentQuery.Get("hash"), ":", 2)
if len(fragmenthash) == 2 {
switch fragmenthash[0] {
case "sha256":
actualHash = sha256.New()
case "sha512":
actualHash = sha512.New()
default:
return fmt.Errorf("unknown hash format: %s", fragmenthash[0])
}
expectedHash = fragmenthash[1]
} else if strings.HasPrefix(imageURL, "https://") {
logrus.Warnf("downloading image without verifying the hash from %s", imageURL)
}
if actualHash == nil {
actualHash = sha256.New()
}
if parsedImageURL.Scheme == "https" || parsedImageURL.Scheme == "http" {
ctx, cancel := context.WithTimeout(context.Background(), time.Hour)
defer cancel()
req, err := http.NewRequest(http.MethodGet, imageURL, nil)
if err != nil {
return fmt.Errorf("error building request to %s: %v", imageURL, err)
}
req = req.WithContext(ctx)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Warn("error fetching image: ", resp.Status)
return fmt.Errorf("error fetching base image: %s", resp.Status)
}
reader = resp.Body
contentLength = resp.ContentLength
} else if parsedImageURL.Scheme == "" {
logrus.Debugf("reading local file %s for root volume", parsedImageURL.Path)
localfile, err := os.Open(parsedImageURL.Path)
if err != nil {
return err
}
defer localfile.Close()
stat, err := localfile.Stat()
if err != nil {
return err
}
reader = localfile
contentLength = stat.Size()
} else {
return fmt.Errorf("unsupported image URL scheme %s", parsedImageURL.Scheme)
}
if actualHash != nil {
reader = io.TeeReader(reader, actualHash)
}
// check for known compression suffixes and decompress
// can also be enabled by adding to the URL fragment:
// https://example.whatever/img.qcow2#compression=xz
fragmentCompression := fragmentQuery.Get("compression")
if strings.HasSuffix(parsedImageURL.Path, ".xz") || fragmentCompression == "xz" {
logrus.Debug("image is xz compressed, decompressing")
reader, err = xz.NewReader(reader, 0)
if err != nil {
return err
}
} else if strings.HasSuffix(parsedImageURL.Path, ".gz") || fragmentCompression == "gzip" {
logrus.Debug("image is gz compressed, decompressing")
reader, err = gzip.NewReader(reader)
if err != nil {
return err
}
} else if strings.HasSuffix(parsedImageURL.Path, ".bz2") || fragmentCompression == "bzip2" {
logrus.Debug("image is bzip2 compressed, decompressing")
reader = bzip2.NewReader(reader)
}
stream, err := conn.NewStream(0)
if err != nil {
return err
}
defer libvirtx.Free(stream)
err = volume.Upload(stream, 0, 0, 0)
if err != nil {
return err
}
logrus.Debug("uploading to volume")
totalRead := 0
nextLog := time.Now()
err = stream.SendAll(func(s *libvirt.Stream, i int) ([]byte, error) {
out := []byte{}
for len(out) < i {
buf := make([]byte, i-len(out))
// fill the buffer of i bytes from the reader
read, err := reader.Read(buf)
if read > 0 {
out = append(out, buf[:read]...)
totalRead += read
}
if (contentLength > 0 && time.Until(nextLog) <= 0) || (int64(totalRead) == contentLength && read > 0) {
logrus.Debug("transfer progress: ", totalRead, "/", contentLength, " (", int((float64(totalRead)/float64(contentLength))*100), "%) ", time.Since(start).Round(time.Millisecond))
nextLog = time.Now().Add(time.Second * 5)
}
if err == io.EOF {
if read > 0 {
// partial read before hitting EOF, return buffer
return out, nil
}
// Go's io interface raise an io.EOF error to indicate the end of the stream,
// but libvirt indicates EOF with a zero-length response
return make([]byte, 0), nil
}
if err != nil {
logrus.Println("Error while reading buffer", err.Error())
return out, err
}
}
return out, nil
})
if err != nil {
return err
}
hashstr := fmt.Sprintf("%x", actualHash.Sum(nil))
if expectedHash != "" {
if hashstr != expectedHash {
return fmt.Errorf("got bad hash when downloading %s: got %s expected %s", imageURL, hashstr, expectedHash)
}
logrus.Debug("hash of downloaded image matched expected")
} else {
logrus.Infof("downloaded %s (hash: %s)", imageURL, hashstr)
}
// qcow2 quirk: because we download a qcow2 file instead of the raw file, the server-provided metadata about the virtual
// volume is left in tact, which sets the virtual disk size to the size the server wants it to be, ignoring our requested
// size. To work around this, we resize the virtual disk size before first boot.
if pool.Type() == pools.StoragePoolTypeDir {
if err := pool.ResizeVolume(name, uint64(sizeGB)); err != nil {
logrus.WithField("volume", name).Error("error resizing qcow2 volume after download")
return err
}
}
return nil
}