Commit 1f4820b2 authored by Maarten de Waard's avatar Maarten de Waard 🤘🏻
Browse files

Merge branch '1-first-version' into 'master'

Resolve "First version"

Closes #1

See merge request !1
parents 4a0a5c47 7869e15f
apiVersion: "v1"
name: "kubernetes-ghost"
version: "0.1.0"
description: "A storage provisioner using Greenhost's cloud platform."
- ""
- name: "Arie Peterson"
email: ""
## Set proper Cosmos permissions
Note: this needs to be done by Greenhost employees at the moment.
1. Create a Project for the cluster in Cosmos2.
2. Assign the corresponding `project_id` to all VPSs that will be part of the
3. Make sure that all VPSs have `api_enabled` set to `1`.
4. If `api_enabled` was already true but you had to assign the project ID, the
VPSs will have api keys that are missing the right role assignments. To fix
this, delete those api keys (you can do that from Cosmos2, by su-ing to the
relevant user, then go to "User settings", tab "API keys"); a new one will be
generated automatically when the VPS is stopped and started from Cosmos2.
5. Reboot the VPSs after changing the `api_enabled` property, and also if you
did something at step 4.
## Install driver and provisioner
Review the `values.yaml` if you want to change any settings; usually the
defaults should suffice. Then, from the top-level directory of this project
(containing `Chart.yaml`), run
`helm install --name=my-personal-ghost .`
This will install the driver on all worker nodes (using a DaemonSet), and a
provisioner that will run as a pod on one worker node (using a Deployment).
# Kubernetes ghost
See also [the website]( for more user-facing
Kubernetes ghost (short for "Greenhost storage") is a Kubernetes component that
allows Kubernetes clusters running on Greenhost's cloud platform to use the
platform's virtual disk images for persistent storage.
It consists of two parts that have to work together: a storage provisioner,
which typically runs as a pod on only one node, and a driver that has to be
installed on every node of the cluster.
## Provisioner
The storage provisioner manages the creation and deletion of persistent
volumes (PVs), based on requests by applications in the form of so-called
persistent volume claims (PVCs). The program registers itself with the
Kubernetes API so that it will be called whenever a new PVC is inserted into the
system, or an old one is deleted.
Creating a PV actually means two things. Firstly a manifest needs to be created,
containing information on the volume; this manifest is passed to Kubernetes,
which stores it in its etcd store. Secondly, the actual backing storage object,
an rbd image in our case, needs to be created. This is done through our own
Cosmos2 API.
## Driver
The storage driver is a small program that needs to be installed on every node
making up the Kubernetes cluster. It is called by Kubernetes whenever a
persistent volume needs to be attached to or detached from a node.
In our case this means calling the Cosmos2 API to attach (or detach) the virtual
disk to the right VPS, but also creating a filesystem on it if necessary, and
mounting it into the right container.
FROM alpine:3.9
COPY src/ /
COPY src/ /
ENTRYPOINT ["sh", "/"]
# Copy the kubernetes-ghost flexvolume driver to the right directory under
# `/drivers`. It is expected that that `/drivers` directory is a bind-mount of
# `/usr/libexec/kubernetes/kubelet-plugins/volume`.
set -o errexit
mkdir -p /drivers/greenhost~cloud
cp / /drivers/greenhost~cloud/cloud
echo "Copied driver, sleeping now."
# After the driver is installed, our job is done. We keep sleeping to maintain
# a healthy status.
while :
sleep 86400
# Write output to stdout.
output() {
printf "$*" >&1
# Write timestamped text to the log file.
debug() {
echo "$(date) $*" >> "${DRIVER_LOG}"
# Read some data from the kernel command line.
# Puts the instance ID in the `vpsID` variable, the token for the Cosmos2
# API in the `apiToken`, and the URL to the Cosmos2 deployment that's
# running this instance in `cosmosUrl`.
getKernelParams() {
for s in $(</proc/cmdline)
case $key in
# Get some necessary parameters from the program command line and kernel
# command line.
getParams() {
local options=$1
debug "vps instance_id: $vpsID"
# We no longer get the api token from Kubernetes, but read it from the VPS
# kernel command line /proc/cmdline. Keeping this for future reference.
# apiToken=$(jq -r '.""' <<<"$options" | base64 -d)
debug "api token: $apiToken"
# The disk ID (according to Cosmos) is passed to us by Kubernetes, as
# instructed by the ghost provisioner.
diskID=$(jq -r '."cosmos-id"' <<<"$options")
debug "disk id: $diskID"
ghostAction() {
debug "ghostAction $*"
local action=$1
local apiToken=$2
local diskImage=$3
local vps=$4
local server="$cosmosUrl"
case $action in
attach) ;&
detach) ;;
output "{\"status\": \"Unsupported action\"}"
exit 0
local url="${server}/api/v2/disks/$diskImage/actions"
debug "curling cosmos: $url"
if [ -z "$vps" ]
local data="{\"type\": \"$action\"}"
local data="{\"type\": \"$action\", \"droplet\": $vps}"
response=$(curl -sS -X POST -H "Authorization: Bearer $apiToken" "$url" -d "$data")
# Record the complete body from the HTTP response in the log.
debug "output: "
debug $(jq '.' <<<"$response")
# Store the `disk_slot` field of the output in a variable for later use.
diskSlot=$(jq '."disk_slot"' <<<"$response")
domount() {
debug "domount $@"
local mountPath=$1
local options=$2
getParams "$options"
# We first tell Cosmos2 to detach the disk from whatever VPS it is attached
# to now. We have to do that, because
# 1. Kubernetes doesn't give enough data at unmount time to know
# which disk to detach; also
# 2. even if we could detach directly after unmount, there might be an error
# during detach, which we need to be able to recover from.
ghostAction "detach" "$apiToken" "$diskID"
# Now tell Cosmos2 to attach the disk to the right VPS.
ghostAction "attach" "$apiToken" "$diskID" "$vpsID"
debug "disk_slot: $diskSlot"
# Convert numeric disk slot index to device letter, so
# 0 becomes a, 1 becomes b, etc.
diskLetter=$(printf \\$(printf '%03o' $((97 + $diskSlot))))
debug "device: $device"
# Use the `blkid` program to scan the device for an existing filesystem.
existingFileSystem=$(blkid "$device")
debug "blkid: $existingFileSystem"
if [ -z "$existingFileSystem" ]
debug "No existing filesystem; creating one."
mkfs.xfs "$device" >/dev/null 2>&1
debug "Existing filesystem."
# Create mount point if necessary.
mkdir -p "$mountPath" >/dev/null 2>&1
# Mount the virtual block device at the given mount point.
mount "$device" "$mountPath" >/dev/null 2>&1
output "{\"status\":\"Success\"}"
exit 0
unmount() {
debug "unmount $@"
local mountPath=$1
# Unmount the file system.
umount "$mountPath" >/dev/null 2>&1
# We would like to detach the disk here, but Kubernetes doesn't give us
# enough information to do so. Instead, we do a just-in-time detach when
# the disk is attached later on.
output "{\"status\":\"Success\"}"
exit 0
# This is the command Kubernetes wants us to perform.
if [ "$op" = "init" ]; then
debug "init $@"
output "{\"status\":\"Success\",\"capabilities\":{\"attach\":false}}"
exit 0
case "$op" in
domount $*
unmount $*
debug "not supported: $op $*"
output "{\"status\":\"Not supported\"}"
exit 0
FROM golang:1.12-alpine as buildenv
RUN apk add --no-cache git
RUN go get
RUN mkdir -p /dist/empty
ADD /dist/dumb-init
RUN chmod 755 /dist/dumb-init
WORKDIR /go/src/ghost
COPY Gopkg.toml ./
COPY cmd ./cmd
COPY ghost ./ghost
RUN dep ensure
RUN CGO_ENABLED=0 go build -a -ldflags '-extldflags "-static"' -o provisioner ./cmd/provisioner
RUN cp /go/src/ghost/provisioner /dist/provisioner
FROM scratch
COPY --from=buildenv /dist/provisioner /provisioner/provisioner
COPY --from=buildenv /dist/dumb-init /usr/local/bin/dumb-init
COPY --from=buildenv /dist/empty /tmp
ENTRYPOINT ["/usr/local/bin/dumb-init", "--"]
CMD ["/provisioner/provisioner"]
name = ""
branch = "master"
name = ""
version = "v2.1.0"
name = ""
version = "kubernetes-1.13.0"
name = ""
version = "kubernetes-1.13.0"
name = ""
version = "10.0.0"
package main
import (
metav1 ""
const (
driver = "greenhost/cloud"
idAnnotation = "greenhost/cosmos-id"
provisionerName = "greenhost/cloud-provisioner"
type GreenhostProvisioner struct {
cosmosClient *ghost.CosmosClient
projectID string
// Check that we satisfy the Provisioner interface:
var _ controller.Provisioner = &GreenhostProvisioner{}
// NewGreenhostProvisioner creates a new provisioner.
// It looks up the project ID for this Kubernetes cluster, so new disk images
// can be properly tagged with that.
func NewGreenhostProvisioner() controller.Provisioner {
cc, err := ghost.NewCosmosClient()
if err != nil {
glog.Fatalf("failed to create CosmosClient: %v", err)
projectID_, err := cc.GetInstanceData()
project := ""
if err != nil {
glog.Fatalf("failed getting project ID: %s", err)
} else {
project = *projectID_
return &GreenhostProvisioner{
cosmosClient: cc,
projectID: project,
// Provision creates a storage asset and returns a pv object representing it.
func (p *GreenhostProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) {
pvName := options.PVC.ObjectMeta.Name
name := "kubernetes-ghost:" + pvName
region := os.Getenv("GHOST_CLOUD_REGION")
requestedSize := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// Round the requested size up to nearest integer number of gigabytes.
size := requestedSize.ScaledValue(9)
// Define the object telling Cosmos the specifications of the disk to
// create.
specs := ghost.Specs{
Name: name,
Region: region,
SizeGigaBytes: size,
ProjectID: p.projectID,
// The Cosmos API returns a description of the created disk image.
diskImage, err := p.cosmosClient.CreateDiskImage(&specs)
if err != nil {
glog.Warningf("Error in creating new disk image %v", err)
return nil, err
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: options.PVName,
Annotations: map[string]string{
idAnnotation: diskImage.ID,
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy,
AccessModes: options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): requestedSize,
PersistentVolumeSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexPersistentVolumeSource{
Driver: driver,
Options: map[string]string{
"cosmos-name": name,
// The disk image ID as given by Cosmos.
"cosmos-id": diskImage.ID,
return pv, nil
func (p *GreenhostProvisioner) Delete(volume *v1.PersistentVolume) error {
pvName := volume.Name
ghostID := volume.Annotations[idAnnotation]
glog.Infof("Deleting pv with name %s and cosmos-id %s", pvName, ghostID)
return nil
func main() {
// We do not currently accept any flags, but this prevents warnings
// in log messages.
flag.Set("logtostderr", "true")
glog.Info("Starting kubernetes-ghost provisioner.")
// Do not restrict permissions on created files and directories.
// Create an InClusterConfig.
config, err := rest.InClusterConfig()
if err != nil {
glog.Fatalf("Error creating InClusterConfig: %v", err)
// Create a Kubernetes client.
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
glog.Fatalf("Error creating Kubernetes client: %v", err)
// Get the Kubernetes server version.
serverVersion, err := clientSet.Discovery().ServerVersion()
if err != nil {
glog.Fatalf("Error getting Kubernetes server version: %v", err)
// Create the provisioner.
GreenhostProvisioner := NewGreenhostProvisioner()
// Create the provision controller.
pc := controller.NewProvisionController(
// Run the provision controller.
package ghost
import (
// Read this machine's instance ID and API token as passed on the
// kernel command line:
// `instance_id=123`.
func getKernelParams() (string, string, error) {
data, err := ioutil.ReadFile("/proc/cmdline")
if err != nil {
return "", "", err
// Split command line into space-separated words.
words := strings.Split(strings.TrimRight(string(data), "\x00"), " ")
var instanceID, apiToken string
for _, word := range words {
glog.Infof("cmdline word: %s", word)
// Split word into parts before and after "=".
components := strings.SplitN(word, "=", 2)
// Check whether this is the argument we're looking for.
if len(components) > 1 {
switch components[0] {
case "instance_id":
instanceID = components[1]
case "api_key":
apiToken = components[1]
if instanceID == "" {
return "", "", errors.New("instance_id not specified on kernel command line")
if apiToken == "" {
return "", "", errors.New("api_key not specified on kernel command line")
return instanceID, apiToken, nil
package ghost
import (
type CosmosClient struct {
InstanceID string
apiToken string
func NewCosmosClient() (*CosmosClient, error) {
instanceID_, apiToken_, err := getKernelParams()
if err != nil {
return nil, err
return &CosmosClient{
InstanceID: instanceID_,
apiToken: apiToken_,
}, nil
func (cc *CosmosClient) ApiCall(method string, endPoint string, data []byte) (*http.Response, error) {
url := os.Getenv("GHOST_COSMOS_API_URL") + endPoint
glog.Infof("calling url: %v", url)
// Prepare the body, if present.
var body io.Reader
if data == nil {
body = nil
} else {
body = bytes.NewReader(data)
glog.Infof("with data: %v", string(data))
// Create the HTTP request.
req, _ := http.NewRequest(method, url, body)
// Add authorization token.
bearer := "Bearer " + cc.apiToken
req.Header.Add("Authorization", bearer)
// Create an HTTP client.
client := &http.Client{}
// Send the HTTP request.
resp, err := client.Do(req)
if err != nil {
glog.Fatalf("Error in http request: %v", err)
return resp, err
package ghost
import (
// Specifications of a disk to be created by Cosmos2.
type Specs struct {
Name string `json:"name"`
Region string `json:"region"`
SizeGigaBytes int64 `json:"size_gigabytes"`
ProjectID string `json:"project_id"`
// This captures the response to a Cosmos2 API call for creating a disk.
type diskResponse struct {
DiskImage DiskImage `json:"diskimage"`
// Some properties of the disk as created by Cosmos2.
type DiskImage struct {
ID string `json:"id"`
SizeGigaBytes string `json:"size"`
// Call the Cosmos2 API to create a disk with the given specifications.
func (cc *CosmosClient) CreateDiskImage(specs *Specs) (*DiskImage, error) {
body, _ := json.Marshal(specs)
resp, err := cc.ApiCall("POST", "/disks", body)
if err != nil {
return nil, err
var diskResponse diskResponse
diskImage := diskResponse.DiskImage
glog.Infof("Created DiskImage object: %+v", diskImage)
return &diskImage, nil