WIP, move from GraphQl to dlayer

feature/node-rewrite
Sven Slootweg 2 years ago
parent 6b5bd204d3
commit 060e7d06e5

@ -1 +1,5 @@
{ "presets": ["@babel/preset-env"] }
{ "presets": [
[ "@babel/preset-env", {
"targets": { "node": "current" }
}]
] }

@ -7,7 +7,7 @@ module.exports = {
client: "pg",
connection: {
connectionString: postgresqlSocketUrl({
socketPath: "/tmp",
socketPath: "/run/postgresql",
database: config.database
})
},

@ -1,51 +1,91 @@
'use strict';
exports.up = function(knex, Promise) {
return Promise.all([
knex.schema.createTable("images", (table) => {
table.increments("id");
table.integer("userId").notNullable(); // user that added it
table.uuid("fileId").notNullable();
table.text("name");
table.text("description");
table.enum("sourceType", ["local", "http", "upload"]).notNullable();
table.text("source"); // URL, path, etc.
table.enum("imageType", ["disk", "tarball"]).notNullable(); // eg. tarballs for OpenVZ
table.boolean("public").notNullable(); // whether the image should be visible to everybody, or just its owner
table.boolean("isInstallMedium").notNullable(); // whether the image is just for installation (if not, it will be directly clonable)
}),
knex.schema.createTable("instances", (table) => {
table.increments("id");
table.integer("userId").notNullable();
table.integer("imageId");
table.integer("lastInstallationMediaId");
table.text("comment");
table.text("customIdentifier");
table.enum("virtualizationType", ["kvm"]).notNullable();
table.integer("memory").notNullable(); // in MB
table.integer("swap"); // in MB
table.integer("diskSpace").notNullable(); // in MB
table.integer("traffic"); // in MB
table.boolean("suspended").notNullable();
table.text("suspensionReason");
table.boolean("terminated").notNullable();
table.text("terminationReason");
table.boolean("running");
}),
knex.schema.createTable("users", (table) => {
table.increments("id");
table.text("username").notNullable();
table.text("hash").notNullable();
table.text("emailAddress").notNullable();
table.boolean("active").notNullable();
})
]);
return knex.schema
.createTable("users", (table) => {
table.bigIncrements("id").primary();
table.text("username").notNullable();
table.text("hash").notNullable();
table.text("email_address").notNullable();
table.boolean("is_active").notNullable();
table.timestamp("created_at").notNullable().defaultTo(knex.fn.now());
})
.createTable("storage_pools", (table) => {
table.bigIncrements("id").primary();
table.text("type").notNullable(); // lvm, folder
table.text("name"); // for lvm VG name
table.text("path"); // for folder base path
table.boolean("is_local").notNullable(); // to prevent trying to attach a storage volume that lives on the wrong host
table.timestamp("created_at").notNullable().defaultTo(knex.fn.now());
})
.createTable("instance_types", (table) => {
table.bigIncrements("id").primary();
table.text("name").notNullable();
table.text("internal_comment");
table.integer("default_memory").notNullable(); // in MB
table.integer("default_disk_space").notNullable(); // in MB
table.integer("default_traffic"); // in MB
table.timestamp("created_at").notNullable().defaultTo(knex.fn.now());
})
.createTable("allowed_storage_pools", (table) => {
table.bigInteger("instance_type_id").notNullable().references("instance_types.id");
table.bigInteger("storage_pool_id").notNullable().references("storage_pools.id");
table.unique([ "instance_type_id", "storage_pool_id" ]);
})
.createTable("images", (table) => {
table.bigIncrements("id").primary();
table.bigInteger("user_id").notNullable().references("users.id"); // user that added it
// table.uuid("file_id").notNullable();
table.text("name");
table.text("description");
table.text("source_type").notNullable(); // local, http, upload
table.text("source"); // URL, path, etc.
table.boolean("is_public").notNullable(); // whether the image should be visible to everybody, or just its owner
table.boolean("is_install_medium").notNullable(); // whether the image is just for installation (if not, it will be directly clonable)
table.timestamp("created_at").notNullable().defaultTo(knex.fn.now());
})
.createTable("instances", (table) => {
table.bigIncrements("id").primary();
table.bigInteger("user_id").notNullable().references("users.id");
table.bigInteger("mounted_image_id").references("images.id");
table.bigInteger("last_installation_media_id").references("images.id");
table.jsonb("boot_order").notNullable(); // array that includes references to storage volumes!
// table.uuid("instance_uuid").notNullable();
table.text("name");
table.text("comment");
table.integer("memory").notNullable(); // in MB
table.integer("disk_space").notNullable(); // in MB
table.integer("traffic"); // in MB
table.boolean("is_suspended").notNullable();
table.text("suspension_reason");
table.boolean("is_terminated").notNullable();
table.text("termination_reason");
table.boolean("is_running");
table.timestamp("created_at").notNullable().defaultTo(knex.fn.now());
})
.createTable("storage_volumes", (table) => {
table.bigIncrements("id").primary();
table.bigInteger("user_id").references("users.id");
table.bigInteger("storage_pool_id").references("storage_pools.id");
// table.uuid("volume_uuid").notNullable();
table.text("format").notNullable(); // qcow2
table.timestamp("created_at").notNullable().defaultTo(knex.fn.now());
})
.createTable("storage_attachments", (table) => {
table.bigInteger("storage_volume_id").notNullable().references("storage_volumes.id").unique();
table.bigInteger("instance_id").notNullable().references("storage_volumes.id");
table.boolean("is_locked").notNullable(); // whether the user should be prevented from detaching this storage volume
});
};
exports.down = function(knex, Promise) {
return Promise.all([
knex.schema.dropTable("images"),
knex.schema.dropTable("instances"),
knex.schema.dropTable("users")
]);
return knex.schema
.dropTable("storage_attachments")
.dropTable("storage_volumes")
.dropTable("instances")
.dropTable("images")
.dropTable("allowed_storage_pools")
.dropTable("instance_types")
.dropTable("storage_pools")
.dropTable("users");
};

@ -1,13 +0,0 @@
'use strict';
exports.up = function(knex, Promise) {
return knex.schema.table("instances", (table) => {
table.uuid("instanceUuid").notNullable();
});
};
exports.down = function(knex, Promise) {
return knex.schema.table("instances", (table) => {
table.dropColumn("instanceUuid");
})
};

@ -1,14 +0,0 @@
'use strict';
exports.up = function(knex, Promise) {
return knex.table("storage_volumes", (table) => {
table.increments("id");
table.integer("instanceId");
table.uuid("volumeUuid").notNullable();
table.enum("format", ["qcow2"]).notNullable();
})
};
exports.down = function(knex, Promise) {
};

@ -5,10 +5,29 @@ MARKER:
- Switch hashing to argon2id
- Switch child_process to execa
Next:
- ISO/image download and storage
- Progress streams + cancellation of operation
- lossy-push-value-stream for progress updates
- Audit logging
- VM creation/initialization + GraphQL API for VM management
IDEAS:
- contextual sidebar on add/edit form pages that shows/highlights all the relevant data for deciding what to fill into the form
- eg. all storage devices and pools when creating a new volume
- or highlighting the currently-editing volume in an edit screen
- Image feeds for disk images / installation media
- Allow arbitrary amount of image feeds to be configured
- Make auto-update optional, and allow a 'whitelist every source host explicitly' setting, for improved supply chain security
- Eventually figure out a sensible signature mechanism for this, that also transitively verifies that the content at the source URLs has not changed
- Make sure to clearly document the security caveats
- The ID of each image from a feed is colon-prefixed with the feed ID, to prevent namespace conflicts
- Eventually allow highly-restricted Markdown for the image descriptions?
----------------------
QEMU setup:
- Ensure to set discard=on for any -blockdev arguments to QEMU - this makes it so that deletes on the guest FS actually free up blocks in the host volume when using a thin pool or sparse file, preventing endless growth of volumes over time.
----------------------
@ -138,3 +157,50 @@ Utilities
fuser
Show which processes use the named files, sockets, or filesystems.
----------------
Disk images
Installation media
Preinstalled images
Host nodes
Hardware
Storage pools
Network pools
VMs
SCHEMA
======
storage pools
-------------
id
type: lvm | folder
vg_name?
path?
storage volumes
---------------
id (name is derived from this)
pool_id
instances
---------
id (name is derived from this, prefixed with cvm-)
attached_image_id
storage_attachments
----------------
instance_id
volume_id
is_primary
MARKER: Implement storage pool setup + network pools + VM spawn

@ -48,6 +48,7 @@
"capitalize": "^2.0.0",
"chalk": "^4.1.0",
"classnames": "^2.2.6",
"concat-arrays": "^2.0.0",
"create-event-emitter": "^1.0.0",
"dataloader": "^1.4.0",
"debounce": "^1.0.0",
@ -59,7 +60,7 @@
"error-chain": "^0.1.2",
"escape-string-regexp": "^2.0.0",
"eval": "^0.1.4",
"execall": "^1.0.0",
"execall": "^2.0.0",
"express": "^4.14.0",
"express-promise-router": "^1.1.0",
"express-ws": "^3.0.0",
@ -68,19 +69,20 @@
"generate-lookup-table": "^1.0.0",
"graphql": "^14.2.1",
"is-iterable": "^1.1.1",
"is-plain-obj": "^2.1.0",
"knex": "^0.13.0",
"map-obj": "^3.0.0",
"is-plain-obj": "^3.0.0",
"knex": "^0.21.18",
"map-obj": "^4.2.1",
"match-value": "^1.1.0",
"memoizee": "^0.4.14",
"nanoid": "^2.1.11",
"object.fromentries": "^2.0.2",
"pegjs": "^0.10.0",
"pg": "^6.1.0",
"pg": "^8.5.1",
"postgresql-socket-url": "^1.0.0",
"react-dom": "^16.8.6",
"snake-case": "^2.1.0",
"split": "^1.0.0",
"split-filter-n": "^1.1.2",
"sse-channel": "^3.1.1",
"syncpipe": "^1.0.0",
"through2": "^2.0.1",

@ -39,8 +39,8 @@ module.exports = function () {
}).then(({tree, list}) => {
return selectors.map((selector) => {
if (selector === All) {
// return tree;
return list;
return tree;
// return list;
} else {
let { path, name } = selector;
@ -54,6 +54,7 @@ module.exports = function () {
}
});
// TODO: Shouldn't this pick from the list instead?
return findInTree({ tree, predicate });
}
});

@ -0,0 +1,27 @@
"use strict";
const Promise = require("bluebird");
const memoizee = require("memoizee");
const lvm = require("../../../packages/exec-lvm");
const All = require("../../../packages/graphql-interface/symbols/all");
module.exports = function () {
let getLogicalVolumesOnce = memoizee(lvm.getLogicalVolumes);
return function (paths) {
return Promise.try(() => {
return getLogicalVolumesOnce();
}).then((result) => {
return result.volumes;
}).then((volumes) => {
return paths.map((path) => {
if (path === All) {
return volumes;
} else {
return volumes.find((device) => device.path === path);
}
});
});
};
};

@ -12,6 +12,8 @@ module.exports = function () {
return function (paths) {
return Promise.try(() => {
return getPhysicalVolumesOnce();
}).then((result) => {
return result.volumes;
}).then((volumes) => {
return paths.map((path) => {
if (path === All) {

@ -0,0 +1,27 @@
"use strict";
const Promise = require("bluebird");
const memoizee = require("memoizee");
const lvm = require("../../../packages/exec-lvm");
const All = require("../../../packages/graphql-interface/symbols/all");
module.exports = function () {
let getVolumeGroupsOnce = memoizee(lvm.getVolumeGroups);
return function (names) {
return Promise.try(() => {
return getVolumeGroupsOnce();
}).then((result) => {
return result.groups;
}).then((groups) => {
return names.map((name) => {
if (name === All) {
return groups;
} else {
return groups.find((group) => group.name === name);
}
});
});
};
};

@ -1,101 +1,104 @@
"use strict";
const Promise = require("bluebird");
const graphql = require("graphql");
const fs = require("fs");
const path = require("path");
const createGraphQLInterface = require("../packages/graphql-interface/index");
const dlayer = require("../packages/dlayer");
const All = require("../packages/graphql-interface/symbols/all");
const loadTypes = require("../packages/graphql-interface/type-loader");
const createLoaders = require("./loaders");
/* FIXME: This seems to be added into a global registry somehow? How to specify this explicitly on a query without relying on globals? */
new graphql.GraphQLScalarType({
name: "ByteSize",
description: "A value that represents a value on a byte scale",
serialize: (value) => {
return JSON.stringify(value);
},
parseValue: (value) => {
return JSON.parse(value);
},
parseLiteral: (value) => {
return JSON.parse(value);
},
});
new graphql.GraphQLScalarType({
name: "TimeSize",
description: "A value that represents a value on a time scale",
serialize: (value) => {
return JSON.stringify(value);
},
parseValue: (value) => {
return JSON.parse(value);
},
parseLiteral: (value) => {
return JSON.parse(value);
},
});
const loaders = require("./loaders");
const types = require("./types");
let schema = graphql.buildSchema(fs.readFileSync(path.resolve(__dirname, "../schemas/main.gql"), "utf8"));
let types = loadTypes({
Drive: require("./types/drive"),
BlockDevice: require("./types/block-device"),
Mount: require("./types/mount"),
LVMPhysicalVolume: require("./types/lvm-physical-volume"),
LVMVolumeGroup: require("./types/lvm-volume-group"),
});
function typeFromSource(source, ids, factoryFunction) {
return Promise.try(() => {
if (ids != null) {
return source.loadMany(ids);
} else {
return source.load(All); // FIXME: Symbol
}
}).then((items) => {
return items.map((item) => factoryFunction(item));
});
}
module.exports = function () {
return createGraphQLInterface(schema, { loaderFactory: createLoaders }, {
hardware: {
drives: function ({ paths }, { data }) {
return Promise.try(() => {
if (paths != null) {
return data.smartctlScan.loadMany(paths);
} else {
return data.smartctlScan.load(All);
}
}).then((devices) => {
return devices.map((device) => {
return types.Drive({ path: device.path });
});
});
}
return dlayer({
makeContext: function () {
return {
sources: loaders()
};
},
resources: {
blockDevices: function ({ names }, { data }) {
return Promise.try(() => {
if (names != null) {
return data.lsblk.loadMany(names);
} else {
return data.lsblk.load(All);
}
}).then((devices) => {
return devices.map((device) => {
return types.BlockDevice({ name: device.name });
});
});
schema: {
hardware: {
drives: ({ paths }, { sources }) => {
return typeFromSource(sources.smartctlScan, paths, (device) => types.Drive({ path: device.path }));
}
},
lvm: {
physicalVolumes: function ({ paths }, { data }) {
return Promise.try(() => {
if (paths != null) {
return data.lvmPhysicalVolumes.loadMany(paths);
} else {
return data.lvmPhysicalVolumes.load(All);
}
}).then((volumes) => {
return volumes.map((volume) => {
return types.LVMPhysicalVolume({ path: volume.path });
});
});
resources: {
blockDevices: ({ names }, { sources }) => {
return typeFromSource(sources.lsblk, names, (device) => types.BlockDevice({ name: device.name }));
},
lvm: {
physicalVolumes: ({ paths }, { sources }) => {
return typeFromSource(sources.lvmPhysicalVolumes, paths, (volume) => types.LVMPhysicalVolume({ path: volume.path }));
},
volumeGroups: ({ names }, { sources }) => {
return typeFromSource(sources.lvmVolumeGroups, names, (group) => types.LVMVolumeGroup({ name: group.name }));
}
},
images: {
installationMedia: [],
vmImages: []
}
}
},
}
});
};
// let schemaTodo = {
// system: {
// hardware: { ... },
// lvm: {
// ...
// }
// },
// resources: {
// storagePools: {
// name: "...",
// type: "lvm|folder",
// isLocal: true,
// folderPath: "...",
// lvmVolumeGroup: LVMVolumeGroup,
// $$update: {
// $arguments: {
// name: "..."
// }
// },
// $$delete: {},
// $collection: {
// $$create: {
// $arguments: {
// name: "poolName",
// volumeGroup: "name",
// // or:
// folderPath: "path"
// }
// // standard fields for a storage pool entry go here, including LVMVolumeGroup resolution!
// }
// },
// },
// networkPools: { ... },
// storageVolumes: {
// id: 0,
// userID: 0,
// pool: StoragePool,
// poolID: 0, // the index/ID within the pool
// size: 0, // bytes-iec
// },
// instances: {
// id: 0,
// userID: 0,
// state: "running|stopped|creating",
// storageVolumes: StorageVolume, // in boot order
// }
// }
// };

@ -10,6 +10,7 @@ let dataSourceFactories = {
smartctlScan: require("./data-sources/smartctl/scan"),
smartctlAttributes: require("./data-sources/smartctl/attributes"),
lvmPhysicalVolumes: require("./data-sources/lvm/physical-volumes"),
lvmVolumeGroups: require("./data-sources/lvm/volume-groups"),
nvmeListNamespaces: require("./data-sources/nvme/list-namespaces"),
};

@ -0,0 +1,19 @@
General architecture of the internal API
========================================
The API implementation consists of a number of different things:
- System interaction libraries, often wrappers around system management programs
- Data sources, a standardized representation of the data exposed by those libraries
- Types, different kinds of 'system objects' (storage drive, block device, LVM volume group, etc.) that can be read from and interacted with
- A root API schema, which represents the 'tree' of available queryable objects
The full API schema is made up of the root schema + the schemas of each type. Types can reference each other, including recursively, so loops in the schema exist. The API uses dlayer as its querying mechanism.
A 'data source' is a factory function that produces a data retrieval function. This data retrieval function accepts an array of object IDs (or a special `All` marker), and is expected to produce a respectively-ordered array of results. The data retrieval function is responsible for doing the entire translation from this standardized retrieval API, to whatever API the underlying system interaction library requires. The `dataloader` library is used to wrap this data retrieval function for more efficient operation.
Each type is defined as an object with a set of keys, whose value is a function that (optionally) accepts an object of named arguments and a context object as its parameters, and which is expected to (asynchronously) return a value for that key. All the previously-defined-and-wrapped data sources are made available under `sources` on the context object. The context object also includes a number of utility functions for evaluating other properties, for edge-cases where the logic of one property is dependent on the value of another. These functions are free to implement whatever logic they require, and in fact this uses the same API as the rest of dlayer - 'types' are not actually special on a dlayer level, they are essentially just lazily-returned schema objects.
In this project, specifically, most properties *are not* defined manually - instead, `dlayer-source` is used, which provides a bridge between dlayer and the data sources mechanism. Rather than manually calling a source for every property that needs to access it, a `$sources` meta-property is defined that specifies every data sources that's needed for this object, and which properties it can satisfy. The value for those property keys may either be a function (to manually extract data from the data-source-provided value) or a string (to simply copy the key from the data-source-provided value).
Data source: ID[] -> DataObject[]
Schema: QueryObject -> (data source) -> SchemaObject[]?

@ -0,0 +1,38 @@
"use strict";
const Promise = require("bluebird");
const api = require("./");
const loaders = require("./loaders");
return Promise.try(() => {
return api.query({
hardware: {
drives: {
model: true,
size: true,
interface: true,
smartHealth: true,
blockDevice: {
name: true,
path: true,
type: true,
children: {
name: true,
path: true,
type: true,
mounts: {
mountpoint: true,
filesystem: true,
totalSpace: true
}
}
}
// allBlockDevices
}
}
});
}).then((result) => {
console.dir(result, { depth: null });
}).catch((error) => {
console.dir(error, { depth: null });
});

@ -3,93 +3,62 @@
const Promise = require("bluebird");
const fs = Promise.promisifyAll(require("fs"));
const matchValue = require("match-value");
const asyncpipe = require("../../packages/asyncpipe");
const { createDataObject, LocalProperties, ID, Dynamic } = require("../../packages/graphql-interface/data-object");
const All = require("../../packages/graphql-interface/symbols/all");
const dlayerSource = require("../../packages/dlayer-source");
const All = require("../../packages/graphql-interface/symbols/all"); // FIXME: Move to dlayer-source?
const treecutter = require("../../packages/treecutter");
const types = require("./");
module.exports = function (types) {
return function BlockDevice({ name, path, _treecutterDepth, _treecutterSequenceNumber }) {
// if (name != null) {
// path = `/dev/${name}`;
// } else if (path != null) {
// name = deviceNameFromPath(path);
// }
module.exports = function BlockDevice({ name, path }) {
return dlayerSource.withSources({
// TODO: Eventually make this produce a (filtered) tree instead?
mounts: function ({ type }, { $getProperty, $getPropertyPath, sources }) {
return Promise.try(() => {
return sources.findmnt.load(All);
}).then((mountTree) => {
return asyncpipe(mountTree, [
(_) => treecutter.flatten(_),
(_) => _.map((mount) => types.Mount({ mountpoint: mount.mountpoint })),
(_) => Promise.filter(_, async (mount) => {
let sourcePath = await $getPropertyPath(mount, "sourceDevice.path");
let sourceName = await $getPropertyPath(mount, "sourceDevice.name");
// return Promise.try(() => {
// return fs.realpathAsync(path);
// }).then((realPath) => {
/* FIXME: parent */
return createDataObject({
[LocalProperties]: {
_treecutterDepth,
_treecutterSequenceNumber
},
[Dynamic]: {
mounts: function ({ type }, { resolveProperty, resolvePropertyPath, resolveDataSource }) {
return Promise.try(() => {
return resolveDataSource("findmnt", All);
}).then((allMounts) => {
return treecutter.flatten(allMounts);
}).map((mount) => {
return types.Mount({ mountpoint: mount.mountpoint });
}).filter((mount) => {
return Promise.try(() => {
return resolvePropertyPath([ "sourceDevice", "path" ], mount);
}).then((sourceDevicePath) => {
// FIXME: Get own path dynamically
return (sourceDevicePath === path);
});
}).then((mounts) => {
if (type != null) {
return Promise.filter(mounts, (mount) => {
return Promise.try(() => {
return resolveProperty("type", mount);
}).then((mountType) => {
return (mountType === type);
});
});
} else {
return mounts;
}
return (
(sourcePath != null && sourcePath === path)
|| (sourceName != null && sourceName === name)
);
})
]);
}).then((relevantMounts) => {
if (type == null) {
return relevantMounts;
} else {
return Promise.filter(relevantMounts, async (mount) => {
return (await $getProperty(mount, "type") === type);
});
}
},
// findmnt: {
// [ID]: All,
// mounts: function (allMounts, { type }, context) {
// let { resolveProperty } = context;
// console.log("CONTEXT", context);
// // FIXME: Why is this called so often?
// }
// },
});
},
$sources: {
lsblk: {
[ID]: { name, path },
[dlayerSource.ID]: { name, path },
name: "name",
path: (device) => {
return fs.realpathAsync(device.path);
},
type: (device) => {
return matchValue(device.type, {
partition: "PARTITION",
disk: "DISK",
loopDevice: "LOOP_DEVICE"
});
},
path: (device) => fs.realpathAsync(device.path),
type: (device) => matchValue(device.type, {
partition: "PARTITION",
disk: "DISK",
loopDevice: "LOOP_DEVICE"
}),
size: "size",
mountpoint: "mountpoint", // FIXME: Isn't this obsoleted by `mounts`?
deviceNumber: "deviceNumber",
removable: "removable",
readOnly: "readOnly",
children: (device) => {
return device.children.map((child) => {
return BlockDevice({ name: child.name });
});
}
children: (device) => device.children.map((child) => {
return BlockDevice({ name: child.name });
})
}
});
// });
};
}
});
};

@ -1,106 +1,53 @@
"use strict";
const Promise = require("bluebird");
const {createDataObject, LocalProperties, ID, Dynamic} = require("../../packages/graphql-interface/data-object");
const upperSnakeCase = require("../../packages/upper-snake-case");
const dlayerSource = require("../../packages/dlayer-source");
const treecutter = require("../../packages/treecutter");
const deviceNameFromPath = require("../../util/device-name-from-path");
const upperSnakeCase = require("../../packages/upper-snake-case");
/* TO IMPLEMENT:
- resolveProperty
- resolveProperties
- resolveDataSource
- Dynamic
*/
module.exports = function (types) {
return function Drive({ path }) {
return createDataObject({
[LocalProperties]: {
path: path,
/* FIXME: allBlockDevices, for representing every single block device that's hosted on this physical drive, linearly. Need to figure out how that works with representation of mdraid arrays, LVM volumes, etc. */
},
[Dynamic]: {
// FIXME: namespaces
blockDevice: (_, { resolveProperty }) => {
return Promise.try(() => {
return resolveProperty("interface");
}).then((interface_) => {
if (interface_ === "nvme") {
/* NVMe drives do not have a single block device, they have zero or more namespaces */
return null;
} else {
return types.BlockDevice({ path: path });
}
});
},
allBlockDevices: ({ type }, { resolveProperty, resolveDataSource }) => {
// FIXME: Figure out how to semantically represent that data cannot be stored directly onto an NVMe device (only onto a namespace), but *can* be directly stored on a *non-NVMe* device... usually, anyway.
const types = require("./");
module.exports = function Drive ({ path }) {
return dlayerSource.withSources({
path: path,
blockDevice: async function(_, { $getProperty }) {
if (await $getProperty(this, "interface") === "nvme") {
return null;
} else {
return types.BlockDevice({ path: path });
}
},
allBlockDevices: async function(_, { $getProperty, sources }) {
return Promise.try(async () => {
if (await $getProperty(this, "interface") === "nvme") {
return Promise.try(() => {
return resolveProperty("interface");
}).then((interface_) => {
if (interface_ === "nvme") {
// Dynamic data source lookup: nvme list-ns -> Drive
return Promise.try(() => {
return resolveDataSource("nvmeListNamespaces", path);
}).map((namespaceId) => {
return `${path}n${namespaceId}`;
});
} else {
return [ path ];
}
}).map((rootPath) => {
return resolveDataSource("lsblk", { path: rootPath });
}).then((blockDeviceTrees) => {
let blockDevices = treecutter.flatten(blockDeviceTrees)
.map((device) => types.BlockDevice(device));
// MARKER: Find a way to reassemble this tree on the client side, for display
// MARKER: Why are most of the mounts (erroneously) empty?
if (type != null) {
return Promise.filter(blockDevices, (device) => {
return Promise.try(() => {
return resolveProperty("type", device.item);
}).then((deviceType) => {
return (deviceType === type);
});
});
} else {
return blockDevices;
}
return sources.nvmeListNamespaces.load(path);
}).map((namespaceID) => {
return `${path}n${namespaceID}`;
});
} else {
return [ path ];
}
},
lsblk: {
[ID]: { path },
// TODO: Implement [DependsOn], for cases where a source data mapper depends on data from more than one source, so it can reference properties defined elsewhere?
// FIXME: Figure out a nice way to make a source lookup conditional upon something else (like only do a `lsblk` if not an NVMe drive, and for NVMe drives return a hardcoded thing)
// allBlockDevices: function (rootDevice, { type }, context) {
// let devices = treecutter.flatten([rootDevice])
// .map((device) => types.BlockDevice({ name: device.name }));
// if (type != null) {
// return Promise.filter(devices, (device) => {
// return Promise.try(() => {
// return device.type({}, context);
// }).then((deviceType) => {
// return (deviceType === type);
// });
// });
// } else {
// return devices;
// }
// }
},
}).then((rootPaths) => {
let queries = rootPaths.map((path) => ({ path: path }));
return sources.lsblk.loadMany(queries);
}).map((blockDeviceTree) => {
return treecutter.map(blockDeviceTree, (device) => types.BlockDevice(device));
}).then((resultArray) => {
// Treecutter always returns an array, regardless of whether the input was an array or not, so we need to flatten it since we will only ever have a single root entry per rootPath query here
return resultArray.flat();
});
},
$sources: {
// lsblk: {
// [dlayerSource.ID]: { path },
// },
smartctlScan: {
[ID]: path,
[dlayerSource.ID]: path,
interface: "interface"
},
smartctlInfo: {
[ID]: path,
[dlayerSource.ID]: path,
model: "model",
modelFamily: "modelFamily",
smartAvailable: "smartAvailable",
@ -117,13 +64,14 @@ module.exports = function (types) {
sataVersion: "sataVersion"
},
smartctlAttributes: {
[ID]: path,
[dlayerSource.ID]: path,
smartAttributes: (attributes) => {
return attributes.map((attribute) => {
return Object.assign({}, attribute, {
return {
... attribute,
type: upperSnakeCase(attribute.type),
updatedWhen: upperSnakeCase(attribute.updatedWhen)
});
};
});
},
smartHealth: (attributes) => {
@ -144,6 +92,6 @@ module.exports = function (types) {
}
}
}
});
};
}
});
};

@ -0,0 +1,9 @@
"use strict";
Object.assign(module.exports, {
Drive: require("./drive"),
BlockDevice: require("./block-device"),
Mount: require("./mount"),
LVMPhysicalVolume: require("./lvm/physical-volume"),
LVMVolumeGroup: require("./lvm/volume-group"),
});

@ -1,32 +0,0 @@
"use strict";
const {createDataObject, LocalProperties, ID} = require("../../packages/graphql-interface/data-object");
module.exports = function (types) {
return function LVMPhysicalVolume({ path }) {
return createDataObject({
[LocalProperties]: {
path: path,
blockDevice: () => {
return types.BlockDevice({ path: path });
}
},
lvmPhysicalVolumes: {
[ID]: path,
volumeGroup: (volume) => {
if (volume.volumeGroup != null) {
return types.LVMVolumeGroup({ name: volume.volumeGroup });
}
},
format: "format",
size: "totalSpace",
freeSpace: "freeSpace",
duplicate: "isDuplicate",
allocatable: "isAllocatable",
used: "isUsed",
exported: "isExported",
missing: "isMissing"
}
});
};
};

@ -1,13 +0,0 @@
"use strict";
const {createDataObject, LocalProperties} = require("../../packages/graphql-interface/data-object");
module.exports = function (_types) {
return function createVolumeGroup({ name }) {
return createDataObject({
[LocalProperties]: {
name: name
}
});
};
};

@ -0,0 +1,23 @@
"use strict";
const dlayerSource = require("../../../packages/dlayer-source");
const types = require("../");
module.exports = function LVMPhysicalVolume ({ path }) {
return dlayerSource.withSources({
$sources: {
lvmPhysicalVolumes: {
[dlayerSource.ID]: path,
path: "path",
format: "format",
totalSpace: "totalSpace",
freeSpace: "freeSpace",
status: "status",
isExported: "isExported",
isMissing: "isMissing",
volumeGroup: (volume) => types.LVMVolumeGroup({ name: volume.volumeGroup })
}
}
});
};

@ -0,0 +1,48 @@
"use strict";
const Promise = require("bluebird");
const dlayerSource = require("../../../packages/dlayer-source");
const types = require("../");
const All = require("../../../packages/graphql-interface/symbols/all");
module.exports = function LVMVolumeGroup ({ name }) {
return dlayerSource.withSources({
physicalVolumes: function (_args, { sources }) {
return Promise.try(() => {
return sources.lvmPhysicalVolumes.load(All);
}).filter((volume) => {
return (volume.volumeGroup === name);
}).map((volume) => {
return types.LVMPhysicalVolume({ path: volume.path });
});
},
logicalVolumes: function (_args, { sources }) {
return Promise.try(() => {
return sources.lvmLogicalVolumes.load(All);
}).filter((volume) => {
return (volume.volumeGroup === name);
}).map((volume) => {
return types.LVMLogicalVolume({ path: volume.path });
});
},
$sources: {
lvmVolumeGroups: {
[dlayerSource.ID]: name,
name: "name",
totalSpace: "totalSpace",
freeSpace: "freeSpace",
physicalVolumeCount: "physicalVolumeCount",
logicalVolumeCount: "logicalVolumeCount",
snapshotCount: "snapshotCount",
isReadOnly: "isReadOnly",
isResizeable: "isResizeable",
isExported: "isExported",
isIncomplete: "isIncomplete",
allocationPolicy: "allocationPolicy",
mode: "mode"
// FIXME: physicalVolumes, logicalVolumes
}
}
});
};

@ -2,66 +2,42 @@
const Promise = require("bluebird");
const fs = Promise.promisifyAll(require("fs"));
const dlayerSource = require("../../packages/dlayer-source");
const types = require("./");
const {createDataObject, LocalProperties, ID, Dynamic} = require("../../packages/graphql-interface/data-object");
module.exports = function Mount({ mountpoint }) {
return dlayerSource.withSources({
mountpoint: mountpoint,
sourceDevice: async (_, { sources }) => {
let mount = await sources.findmnt.load(mountpoint);
module.exports = function (types) {
return function Mount({ mountpoint }) {
return createDataObject({
[LocalProperties]: {
mountpoint: mountpoint
},
[Dynamic]: {
sourceDevice: (_, { resolveDataSource }) => {
// FIXME: This code is rather bulky, maybe there should be a first-class way to express "try to create a data object that may fail"
return Promise.try(() => {
return resolveDataSource("findmnt", mountpoint);
}).then((mount) => {
if (mount.sourceDevice != null) {
return Promise.try(() => {
return fs.realpathAsync(mount.sourceDevice);
}).then((sourcePath) => {
return Promise.try(() => {
return resolveDataSource("lsblk", { path: sourcePath });
}).then((lsblkResult) => {
if (lsblkResult != null) {
return types.BlockDevice({ path: sourcePath });
} else {
// This occurs when the `sourceDevice` is a valid device, but it is not a *block* device, eg. like with `/dev/fuse`
return null;
}
});
});
} else {
return null;
}
});
if (mount != null) {
if (mount.sourceDevice != null) {
let sourcePath = await fs.realpathAsync(mount.sourceDevice);
if (await sources.lsblk.load({ path: sourcePath }) != null) {
return types.BlockDevice({ path: sourcePath });
} else {
// This occurs when the `sourceDevice` is a valid device, but it is not a *block* device, eg. `/dev/fuse`
return null;
}
} else {
// This occurs when the mount is not backed by a device, eg. an sshfs FUSE mount
return null;
}
},
} else {
// TODO: Can this ever happen for any legitimate reason?
throw new Error(`Mountpoint '${mountpoint}' not found in findmnt output`);
}
},
$sources: {
findmnt: {
[ID]: mountpoint,
[dlayerSource.ID]: mountpoint,
id: "id",
// FIXME: Aren't we inferring the below somewhere else in the code, using the square brackets?
type: (mount) => {
if (mount.rootPath === "/") {
return "ROOT_MOUNT";
} else {
return "SUBMOUNT";
}
},
// sourceDevice: (mount) => {
// return Promise.try(() => {
// if (mount.sourceDevice != null) {
// return Promise.try(() => {
// return fs.realpathAsync(mount.sourceDevice);
// }).then((sourcePath) => {
// return types.BlockDevice({ path: sourcePath });
// });
// } else {
// return null;
// }
// });
// },
type: (mount) => (mount.rootPath === "/")
? "ROOT_MOUNT"
: "SUBMOUNT",
filesystem: "filesystem",
options: "options",
label: "label",
@ -76,12 +52,10 @@ module.exports = function (types) {
taskID: "taskID",
optionalFields: "optionalFields",
propagationFlags: "propagationFlags",
children: (mount) => {
return mount.children.map((child) => {
return Mount({ mountpoint: child.mountpoint });
});
}
children: (mount) => mount.children.map((child) => {
return Mount({ mountpoint: child.mountpoint });
})
}
});
};
}
});
};

@ -21,7 +21,7 @@ module.exports = function () {
let db = knex(require("../knexfile"));
let imageStore = require("./util/image-store")(projectPath("./images"));
let taskTracker = require("../lib/tasks/tracker")();
let apiQuery = require("./api")();
let api = require("./api")();
let state = {db, imageStore, taskTracker};
@ -36,19 +36,20 @@ module.exports = function () {
? template.queryArguments(locals)
: {};
return apiQuery(template.query, queryArguments);
return api.query(template.query, queryArguments);
}
}).then((result) => {
if (result == null) {
return {};
} else {
if (result.errors != null && result.errors.length > 0) {
throw result.errors[0];
} else {
return {
data: result.data
};
}
return { data: result };
// if (result.errors != null && result.errors.length > 0) {
// throw result.errors[0];
// } else {
// return {
// data: result.data
// };
// }
}
});
}
@ -77,6 +78,12 @@ module.exports = function () {
app.use("/disk-images", require("./routes/disk-images")(state));
app.use("/instances", require("./routes/instances")(state));
app.use("/hardware/storage-devices", require("./routes/storage-devices")(state));
app.use("/resource-pools", require("./routes/resource-pools")(state));
app.get("/hardware", (req, res) => {
// FIXME: default to /hardware/system-information instead, when that is implemented
res.redirect("/hardware/storage-devices");
});
app.use((err, req, res, next) => {
/* GraphQL will wrap any data-resolving errors in its own error type, and that'll break our `showChain` logic below. Note that some GraphQL errors may not *have* an originalError (eg. schema violations), so we account for that as well. */
@ -108,7 +115,7 @@ module.exports = function () {
error: err
});
debugger;
// debugger;
});
return app;

@ -0,0 +1,9 @@
"use strict";
const Promise = require("bluebird");
module.exports = function asyncpipe(value, pipeline) {
return Promise.reduce(pipeline, (lastValue, func) => {
return func(lastValue);
}, value);
};

@ -0,0 +1,73 @@
"use strict";
const Promise = require("bluebird");
const syncpipe = require("syncpipe");
const util = require("util");
const ID = Symbol("dlayer-source object ID");
// TODO: Make more readable
module.exports = {
withSources: function withSources(schemaObject) {
let { $sources, ... rest } = schemaObject;
let generatedProperties = syncpipe($sources ?? {}, [
(_) => Object.entries(_),
(_) => _.flatMap(([ source, properties ]) => {
return Object.entries(properties).map(([ property, selector ]) => {
// This is to support property name shorthand used in place of a selector function
let effectiveSelector = (typeof selector === "string")
? (result) => {
// FIXME: Consider whether to add this check or not; currently, it would break stuff in CVM
// if (selector in result) {
return result[selector];
// } else {
// throw new Error(`Result object does not have a '${selector}' property`);
// }
}
: selector;
let getter = function (_args, context) {
return Promise.try(() => {
if (properties[ID] != null) {
let dataSource = context.sources[source];
if (dataSource != null) {
// console.log(`Calling source '${source}' with ID ${util.inspect(properties[ID])}`);
return dataSource.load(properties[ID]);
} else {
throw new Error(`Attempted to read from source '${source}', but no such source is registered`);
}
} else {
// FIXME: Better error message
throw new Error(`Must specify a dlayer-source ID`);
}
}).then((result) => {
// console.log(`Result [${source}|${util.inspect(properties[ID])}] ${util.inspect(result)}`);
// TODO: How to deal with null results? Allow them or not? Make it an option?
if (result != null) {
return effectiveSelector(result);
} else {
throw new Error(`Null-ish result returned for ID '${properties[ID]}' from source '${source}'; this is not allowed, and there is probably a bug in your code. Please file a ticket if you have a good usecase for null-ish results!`);
}
});
};
return [ property, getter ];
});
}),
(_) => Object.fromEntries(_)
]);
// NOTE: We always specify the generated properties first, so that properties can be overridden by explicit known values to bypass the source lookup, if needed by the implementation
return {
... generatedProperties,
... rest
};
},
ID: ID
};

@ -0,0 +1,199 @@
"use strict";
const Promise = require("bluebird");
const mapObject = require("map-obj");
// TODO: Bounded/unbounded recursion
// TODO: context
// TODO: $required query predicate
// TODO: Lazy query objects, which get initialized by calling a function that gets the parent object as an argument? This would not be serializable over the network!
// FIXME: $getProperty, $getPropertyPath, maybe $resolveObject/$query?
// FIXME: Allow setting an evaluation depth limit for queries, to limit eg. recursion
// FIXME: recurseDepth, recurseLabel/recurseGoto
/* Recursion design:
When setting `$recurse: true` on a child property, the parent schema gets duplicated with the child schema merged into it, and the resulting combined schema is used for the recursive fetching. Because the child schema can explicitly set properties to false, this allows for both "fetch in parent but not in recursed children" cases (true in parent, false in child) and "fetch in recursed children but not in parent" cases (unspecified or false in parent, true in child).
The schema merging will eventually become deep-merging, when multi-level recursion is implemented (ie. the possibility to recurse indirectly).
*/
const specialKeyRegex = /^\$[^\$]/;
function stringifyPath(queryPath, schemaPath) {
return queryPath
.map((segment, i) => {
if (segment === schemaPath[i]) {
return segment;
} else {
// This is used for representing aliases, showing the original schema key in brackets
return `${segment} [${schemaPath[i]}]`;
}
})
.join(" -> ");
}
function maybeCall(value, args, thisContext) {
return Promise.try(() => {
if (typeof value === "function") {
return value.call(thisContext, ...args);
} else {
return value;
}
});
}
function isObject(value) {
// FIXME: Replace this with a more sensible check, like is-plain-object
return (value != null && typeof value === "object" && !Array.isArray(value));
}
// TODO: Move to separate package, decide whether to keep the nested array detection or not - that should probably just be part of the handler?
function mapMaybeArray(value, handler) {
// NOTE: This is async!
if (Array.isArray(value)) {
return Promise.map(value, (item) => {
if (Array.isArray(item)) {
throw new Error(`Encountered a nested array, which is not allowed; maybe you forgot to flatten it?`);
} else {
return handler(item);
}
});
} else {
return handler(value);
}
}
/* Possible values of a schema property:
true, null, object with only special keys (but not $recurse) -- fetch value and return it as-is
false -- do not fetch value at all
object with $recurse -- recursively fetch, optionally with extra keys to fetch (or ignore) for recursed children only, inheriting the rest of the schema from the parent
object with regular non-special keys -- fetch object and continue fetching into child properties according to the schema
a "special key" is any key that is prefixed with $ - they are used to provide additional parameters to dlayer, and cannot be used for business logic keys
*/
function asyncMapObject(object, handler) {
return Promise.props(mapObject(object, handler));
}
function analyzeSubquery(subquery) {
let isRecursive = (subquery?.$recurse === true);
let hasChildKeys = isObject(subquery) && Object.keys(subquery).some((key) => !specialKeyRegex.test(key));
let isLeaf = (subquery === true || subquery === null || (!hasChildKeys && !isRecursive));
let args = subquery?.$arguments ?? {};
return { isRecursive, hasChildKeys, isLeaf, args };
}
function analyzeQueryKey(schemaObject, queryObject, queryKey) {
let subquery = queryObject[queryKey];
let schemaKey = subquery?.$key ?? queryKey; // $key is for handling aliases
let handler = schemaObject[schemaKey] ?? schemaObject.$anyKey;
return {
... analyzeSubquery(subquery),
schemaKey: schemaKey,
handler: handler
};
}
function evaluate(schemaObject, queryObject, context, queryPath, schemaPath) {
// map query object -> result object
return asyncMapObject(queryObject, (queryKey, subquery) => {
let shouldFetch = (subquery !== false);
if (!shouldFetch || specialKeyRegex.test(queryKey)) {
// When constructing the result object, we only care about the 'real' keys, not about special meta-keys like $key; those get processed in the actual resolution logic itself.
return mapObject.mapObjectSkip;
} else {
let { schemaKey, handler, args, isRecursive, isLeaf } = analyzeQueryKey(schemaObject, queryObject, queryKey);
if (handler != null) {
let promise = Promise.try(() => {
// This calls the data provider in the schema
return maybeCall(handler, [ args, context ], schemaObject);
}).then((result) => {
let nextQueryPath = queryPath.concat([ queryKey ]);
let nextSchemaPath = schemaPath.concat([ schemaKey ]);
return Promise.try(() => {
if (!isLeaf && result != null) {
let effectiveSubquery = (isRecursive)
? { ... queryObject, ... subquery }
: subquery;
return mapMaybeArray(result, (item) => {
return evaluate(item, effectiveSubquery, context, nextQueryPath, nextSchemaPath);
});
} else {
// null / undefined are returned as-is, so are leaves
return result;
}
}).catch((error) => {
// FIXME: Chain properly
if (error.path == null) {
// Only assign the path if it hasn't already happened at a deeper level; this is a recursive function after all
error.path = nextQueryPath;
error.message = error.message + ` (${stringifyPath(nextQueryPath, nextSchemaPath)})`;
}
throw error;
});
});
return [ queryKey, promise ];
} else {
throw new Error(`No key '${schemaKey}' exists in the schema`);
}
}
});
}
module.exports = function createDLayer(options) {
// options = { schema, makeContext }
return {
query: function (query, context) {
let generatedContext = (options.makeContext != null)
? options.makeContext()
: {};
function getProperty(object, property, args = {}) {
// FIXME: Validatem
if (object == null) {
throw new Error(`Empty object passed`);
}
if (property in object) {
return maybeCall(object[property], [ args, combinedContext ], object);
} else {
// FIXME: Better error message with path
throw new Error(`No key '${property}' exists in the schema`);
}
}
let combinedContext = {
... generatedContext,
... context,
// FIXME: Figure out a way to annotate errors here with the path at which they occurred, *and* make clear that it was an internal property lookup
$getProperty: getProperty,
$getPropertyPath: function (object, propertyPath) {
let parsedPath = (typeof propertyPath === "string")
? propertyPath.split(".")
: propertyPath;
return Promise.reduce(parsedPath, (currentObject, pathSegment) => {
if (currentObject != null) {
return getProperty(currentObject, pathSegment);
} else {
// Effectively null-coalescing
return null;
}
}, object);
}
};
return evaluate(options.schema, query, combinedContext, [], []);
}
};
};

@ -0,0 +1,48 @@
"use strict";
const Promise = require("bluebird");
const dlayer = require("./");
const loaders = require("../../api/loaders");
let schema = {
hardware: {
drives: function () {
return [{
name: "foo",
size: () => "4 GiB"
}, {
name: "bar",
size: () => "2 TiB"
}];
},
primaryNetworkInterface: function () {
return {
name: "baz",
dataRate: () => "2.5 gbps"
};
}
}
};
let api = dlayer({
schema: schema
});
return Promise.try(() => {
return api.query({
hardware: {
drives: {
name: true,
size: true
},
primaryNetworkInterface: {
name: true
}
}
});
}).then((result) => {
console.dir(result, {depth: null});
}).catch((error) => {
console.dir("Unhandled error", error);
});

@ -7,13 +7,20 @@ const util = require("util");
const execFileAsync = util.promisify(require("child_process").execFile);
const debug = require("debug")("cvm:execBinary");
const asExpression = require("as-expression");
const splitFilterN = require("split-filter-n");
const { rethrowAs, chain } = require("error-chain");
const isPlainObj = require("is-plain-obj");
const concatArrays = require("concat-arrays");
const unreachable = require("@joepie91/unreachable")("cvm"); // FIXME: Change on publish
const textParser = require("../text-parser");
const errors = require("./errors");
const OutputForbidden = Symbol("OutputForbidden");
/* FIXME: How to handle partial result parsing when an error is encountered in the parsing adapter? */
/* FIXME: Test that flag-dash prevention in arguments works */
// FIXME: Explicitly document that text parsers *should* allow for specifying arbitrary postprocessing JS in some way
function keyToFlagName(key) {
if (key.startsWith("!")) {
@ -54,8 +61,83 @@ function validateArguments(args) {
}
}
function testExitCode(exitCode, allowedExitCodes) {
if (allowedExitCodes === null) {
// NOTE: The `===` here is intentional; *only* a null is considered to mean "any exit code allowed", so that when an `undefined` gets passed in accidentally, it doesn't silently do the wrong thing.
return true;
} else {
return allowedExitCodes.includes(exitCode);
}
}
function tryExpectation(expectation, channels) {
let channelName = expectation.channel;
let channel = channels[channelName];
let channelAsString = channel.toString();
if (channel != null) {
if (expectation.adapter === OutputForbidden && channelAsString.length > 0) {
throw new errors.UnexpectedOutput(`Encountered output on '${channelName}', but no output was supposed to be produced there`, {
failedChannel: channelName
});
} else {
let result = asExpression(() => {
try {
return expectation.adapter.parse(channelAsString);
} catch (error) {
if (error instanceof textParser.ParseError) {
throw error;
} else {
throw chain(error, errors.OutputParsingFailed, `An error occurred while parsing '${channelName}'`, {
failedChannel: expectation.channel
});
}
}
});
if (textParser.isErrorResult(result)) {
result.throw();
// } else if (result === undefined || isPlainObj(result)) { // NOTE: Currently broken, see https://github.com/sindresorhus/is-plain-obj/issues/11
} else if (result === undefined || (typeof result === "object" && !Array.isArray(result) )) {
return result;
} else {
throw new Error(`Output adapters may only return a plain object from their parse method (or nothing at all)`);
}
}
} else {
throw unreachable(`Encountered expectation for unexpected channel '${channelName}'`);
}
}
const NoResult = Symbol("NoResult");
function testExpectations(expectations, channels) {
return expectations
.map((expectation) => {
try {
return tryExpectation(expectation, channels);
} catch (error) {
if (error instanceof textParser.ParseError) {
if (expectation.required !== true) {
return NoResult;
} else {
let channelName = expectation.channel;
throw chain(error, errors.ExpectedOutputMissing, `A required parser failed to parse the output on ${channelName}`, {
failedChannel: channelName
});
}
} else {
throw error;
}
}
})
.filter((result) => result !== NoResult);
}
// FIXME: Immutable-builder abstraction
// FIXME: validatem
// FIXME: Reconsider the exit code handling; should we always permit stderr parsing even if a non-zero exit code occurs?
module.exports = function createBinaryInvocation(command, args = []) {
/* FIXME: The below disallows dashes in the args, but not in the command. Is that what we want? */
validateArguments(args);
@ -67,6 +149,7 @@ module.exports = function createBinaryInvocation(command, args = []) {
flags: {},
environment: {},
expectedExitCodes: [0],
resultRequired: false,
resultMerger: function (results) {
return results.reduce((merged, result) => Object.assign(merged, result), {});
}
@ -86,6 +169,12 @@ module.exports = function createBinaryInvocation(command, args = []) {
asRoot: function () {
return this._withSettings({ asRoot: true });
},
withAllowedExitCodes: function (allowedExitCodes) {
return this._withSettings({ expectedExitCodes: allowedExitCodes });
},
withAnyExitCode: function () {
return this._withSettings({ expectedExitCodes: null });
},
withFlags: function (flags) {
if (flags != null) {
return this._withSettings({
@ -124,10 +213,17 @@ module.exports = function createBinaryInvocation(command, args = []) {
required: true
});
},
failOnStdout: function (adapter) {
// failOnStdout: function (adapter) {
// return this._withExpectation({
// channel: "stdout",
// adapter: adapter,
// disallowed: true
// });
// },
failOnAnyStdout: function () {
return this._withExpectation({
channel: "stdout",
adapter: adapter,
adapter: OutputForbidden,
disallowed: true
});
},
@ -144,20 +240,25 @@ module.exports = function createBinaryInvocation(command, args = []) {
required: true
});
},
failOnStderr: function (adapter) {
return this._withExpectation({
channel: "stderr",
adapter: adapter,
disallowed: true
});
},
// failOnStderr: function (adapter) {
// return this._withExpectation({
// channel: "stderr",
// adapter: adapter,
// disallowed: true
// });
// },
failOnAnyStderr: function () {
return this._withExpectation({
channel: "stderr",
adapter: null,
adapter: OutputForbidden,
disallowed: true
});
},
requireResult: function () {
// NOTE: This requires that *any* adapter produces a result, it doesn't matter which one.
// FIXME: Should this be inverted so that "requires result" is the default, and the user can opt out of that?
return this._withSettings({ requireResult: true });
},
then: function () {
throw new Error("Attempted to use a command builder as a Promise; you probably forgot to call .execute");
},
@ -189,82 +290,52 @@ module.exports = function createBinaryInvocation(command, args = []) {
return { stdout, stderr, error, exitCode };
}).then(({stdout, stderr, error, exitCode}) => {
try {
let channels = { stdout, stderr };
let { expectedExitCodes, expectations, resultMerger, resultRequired } = this._settings;
let expectationsByChannel = splitFilterN(expectations, [ "stdout", "stderr" ], (expectation) => expectation.channel);
let channels = { stdout, stderr };
if (!this._settings.expectedExitCodes.includes(exitCode)) {
// 1. process stderr expectations
// 2. throw on invalid exit code if there was no stderr match
// 3. only process stdout expectations if exit code was valid *and* there was no throw
try {
let hasValidExitCode = testExitCode(exitCode, expectedExitCodes);
let stderrResults = testExpectations(expectationsByChannel.stderr, channels);
// TODO: Add an option to validate the exit code *even* when there's stderr output
if (stderrResults.length === 0 && !hasValidExitCode) {
// FIXME: Can we actually pass `error` to be chained onto here, when there's a case where `error` is undefined? Namely, when requiring a non-zero exit code, but the process exits with 0.
throw chain(error, errors.NonZeroExitCode, `Expected exit code to be one of ${JSON.stringify(this._settings.expectedExitCodes)}, but got '${exitCode}'`, {
throw chain(error, errors.NonZeroExitCode, `Expected exit code to be one of ${JSON.stringify(expectedExitCodes)}, but got '${exitCode}'`, {
exitCode: exitCode,
stdout: stdout,
stderr: stderr
});
} else {
let expectationResults = this._settings.expectations
.map((expectation) => {
if (expectation.adapter == null) {
if (channels[expectation.channel] != null) {
if (channels[expectation.channel].length > 0) {
throw new errors.UnexpectedOutput(`Encountered output on '${expectation.channel}', but no output was supposed to be produced there`, {
failedChannel: expectation.channel
});
} else {
return undefined;
}
} else {
// FIXME: use @joepie91/unreachable
throw new Error(`Encountered expectation for unexpected channel '${expectation.channel}'; this is a bug, please report it`, {
failedChannel: expectation.channel
});
}
} else {
let result = asExpression(() => {
try {
return expectation.adapter.parse(channels[expectation.channel].toString());
} catch (error) {
// TODO: What if both `required` *and* `disallowed`? Can that ever occur, conceptually speaking?
if (error instanceof textParser.NoResult) {
// FIXME: Annotate to make error source clearer?
if (expectation.required === true) {
throw error;
} else {
return undefined;
}
} else {
throw chain(error, errors.OutputParsingFailed, `An error occurred while parsing '${expectation.channel}'`, {
failedChannel: expectation.channel
});
}
}
});
if (result !== undefined && (typeof result !== "object" || Array.isArray(result))) {
throw new Error(`Output adapters may only return a plain object from their parse method (or nothing at all)`);
} else if (result !== undefined && expectation.disallowed === true) {
// TODO: How to make this error more informative?
throw new errors.UnexpectedOutput(`Encountered output on '${expectation.channel}' that isn't supposed to be there`, {
failedChannel: expectation.channel
});
} else {
return result;
}
}
})
.filter((result) => {
return (result != null);
});
}
let mergedResults = (expectationResults.length > 0)
? this._settings.resultMerger(expectationResults)
: expectationResults[0];
let stdoutResults = testExpectations(expectationsByChannel.stdout, channels);
let allResults = concatArrays(stderrResults, stdoutResults);
return {
exitCode: exitCode,
stdout: stdout,
stderr: stderr,
result: mergedResults
};
}
let mergedResults = asExpression(() => {
if (allResults.length === 0) {
if (!resultRequired) {
return {};
} else {
throw new errors.ExpectedOutputMissing(`At least one of the output parsers should have produced a result, but none of them did`);
}
} else if (allResults.length === 1) {
return allResults[0];
} else {
// FIXME: Make merger explicitly configurable with a dedicated configuration method
return resultMerger(allResults);
}
});
return {
exitCode: exitCode,
stdout: stdout,
stderr: stderr,
result: mergedResults
};
} catch (error) {
// FIXME: Use getAllContext
let message = (error.failedChannel != null)

@ -12,8 +12,8 @@ const createJsonParser = require("../text-parser-json");
function mapMountList(mounts) {
return mounts.map((mount) => {
// Some poorly-documented pseudo-filesystems were not worth investigating mount options for, yet. For those, we silently ignore missing/unknown entries.
// TODO: FUSE should eventually be removed from this list
let missingOptionsAllowed = ["cgroup", "cgroup2", "bpf", "pstore", "fuse"].includes(mount.fstype);
// TODO: FUSE, UDF should eventually be removed from this list
let missingOptionsAllowed = ["cgroup", "cgroup2", "bpf", "pstore", "fuse", "udf"].includes(mount.fstype);
let parsedOptions = parseMountOptions(mount.fstype, mount.options);

@ -0,0 +1,28 @@
"use strict";
const Promise = require("bluebird");
const execBinary = require("../../exec-binary");
const unattendedFlags = require("../modifiers/unattended-flags");
const handleDeviceNotFound = require("../modifiers/handle-device-not-found");
const handleVolumeGroupNotFound = require("../modifiers/handle-volume-group-not-found");
const handlePhysicalVolumeInUse = require("../modifiers/handle-physical-volume-in-use");
const handlePartitionExists = require("../modifiers/handle-partition-exists");
const handleIncompatibleDevice = require("../modifiers/handle-incompatible-device");
module.exports = function ({ physicalVolume, volumeGroup }) {
return Promise.try(() => {
return execBinary("vgextend", [volumeGroup, physicalVolume])
.asRoot()
.requireResult()
.withModifier(unattendedFlags)
.withModifier(handleDeviceNotFound(physicalVolume))
.withModifier(handleVolumeGroupNotFound(volumeGroup))
.withModifier(handlePhysicalVolumeInUse(physicalVolume))
.withModifier(handlePartitionExists(physicalVolume, "add device to Volume Group"))
.withModifier(handleIncompatibleDevice(physicalVolume, "added to the Volume Group"))
.execute();
}).then((_output) => {
return true;
});
};

@ -0,0 +1,25 @@
"use strict";
const Promise = require("bluebird");
const execBinary = require("../../exec-binary");
const forceFlags = require("../modifiers/force-flags");
const unattendedFlags = require("../modifiers/unattended-flags");
const handleDeviceNotFound = require("../modifiers/handle-device-not-found");
const handlePartitionExists = require("../modifiers/handle-partition-exists");
const handleDeviceInUse = require("../modifiers/handle-device-in-use");
module.exports = function ({ devicePath, force }) {
return Promise.try(() => {
return execBinary("pvcreate", [devicePath])
.asRoot()
.requireResult()
.withModifier((force === true) ? forceFlags : unattendedFlags)
.withModifier(handleDeviceNotFound(devicePath))
.withModifier(handleDeviceInUse(devicePath))
.withModifier(handlePartitionExists(devicePath, "create a Physical Volume"))
.execute();
}).then((_output) => {
return true;
});
};

@ -0,0 +1,63 @@
"use strict";
const Promise = require("bluebird");
const execBinary = require("../../exec-binary");
const { errorResult } = require("../../text-parser");
const createRegexParser = require("../../text-parser-regex");
const unattendedFlags = require("../modifiers/unattended-flags");
const errors = require("../errors");
// TODO: Plural versions of handle modifiers? Or just have them accept an array of inputs?
module.exports = function ({ name, physicalVolumes }) {
return Promise.try(() => {
// FIXME: Validatem
if (/^[a-zA-Z0-9_][a-zA-Z0-9+_.-]*$/.test(name)) {
return execBinary("vgcreate", [name, ...physicalVolumes])
.asRoot()
.requireResult()
.withModifier(unattendedFlags)
.expectOnStderr(createRegexParser(/A volume group called ([^"]+) already exists\./, () => {
return errorResult(new errors.VolumeGroupExists(`A volume group with the name '${name}' already exists`, {
volumeGroupName: name
}));
}))
.expectOnStderr(createRegexParser(/WARNING: [a-z]+ signature detected on (.+) at offset/g, (matches) => {
let failedDevices = matches.map((match) => match.subMatches[0]);
let list = failedDevices.join(", ");
return errorResult(new errors.PartitionExists(`Refused to create a Volume Group, as partitions or partition tables already exist on the following devices: ${list}`, {
paths: failedDevices
}));
}))
.expectOnStderr(createRegexParser(/Device (.+) not found\./g, (matches) => {
let failedDevices = matches.map((match) => match.subMatches[0]);
let list = failedDevices.join(", ");
return errorResult(new errors.InvalidPath(`The following specified devices do not exist: ${list}`, {
paths: failedDevices
}));
}))
.expectOnStderr(createRegexParser(/Physical volume '([^']+)' is already in volume group '([^']+)'/g, (matches) => {
let failedItems = matches.map((match) => {
let [ device, volumeGroup ] = match.subMatches;
return { device, volumeGroup };
});
let list = failedItems
.map((item) => `${item.device} (${item.volumeGroup})`)
.join(", ");
return errorResult(new errors.PhysicalVolumeInUse(`The following specified Physical Volumes are already in use in another Volume Group: ${list}`, {
volumes: failedItems
}));
}))
.execute();
} else {
throw new errors.InvalidName(`The specified Volume Group name '${name}' contains invalid characters`);
}
}).then((_output) => {
return true;
});
};

@ -0,0 +1,29 @@
"use strict";
const Promise = require("bluebird");
const execBinary = require("../../exec-binary");
const { errorResult } = require("../../text-parser");
const createRegexParser = require("../../text-parser-regex");
const handleDeviceNotFound = require("../modifiers/handle-device-not-found");
const handlePhysicalVolumeInUse = require("../modifiers/handle-physical-volume-in-use");
const errors = require("../errors");
module.exports = function ({ devicePath }) {
return Promise.try(() => {
return execBinary("pvremove", [devicePath])
.asRoot()
.requireResult()
.withModifier(handleDeviceNotFound(devicePath))
.withModifier(handlePhysicalVolumeInUse(devicePath))
.expectOnStdout(createRegexParser(/Labels on physical volume "[^"]+" successfully wiped\./, () => undefined))
.expectOnStderr(createRegexParser(/No PV( label)? found on .+\./, () => {
return errorResult(new errors.InvalidPath(`Specified device '${devicePath}' is not a Physical Volume`, {
path: devicePath
}));
}))
.execute();
}).then((_output) => {
return true;
});
};

@ -0,0 +1,188 @@
"use strict";
const Promise = require("bluebird");
const execBinary = require("../../exec-binary");
const parseIECBytes = require("../../parse-bytes-iec");
const matchValue = require("match-value");
const asJson = require("../modifiers/as-json");
const mapFlag = require("../map-flag");
const parseStringList = require("../parse-string-list");
const parseOptionalString = require("../parse-optional-string");
const parseOptionalDate = require("../parse-optional-date");
const parseIntStrict = require("../../parse-int-strict");
function isUppercase(string) {
return (string === string.toUpperCase());
}
module.exports = function () {
return Promise.try(() => {
return execBinary("lvs")
.asRoot()
.withFlags({
options: "lv_all"
})
.withModifier(asJson((result) => {
return {
volumes: result.report[0].lv.map((volume) => {
return {
path: volume.lv_path,
name: volume.lv_name, // NOTE: Not unique!
fullName: volume.lv_full_name,
uuid: volume.lv_uuid,
deviceMapperPath: volume.lv_dm_path,
// FIXME: lv_parent -- is this just for thin pools?
// FIXME: lv_active, only known value is 'active' but it's not documented
layoutAttributes: parseStringList(volume.lv_layout), // linear, mirrored, striped // FIXME: check for specific values here? and can there really be multiple?
roles: parseStringList(volume.lv_role),
tags: parseStringList(volume.lv_tags),
configurationProfile: parseOptionalString(volume.lv_profile),
creationTime: parseOptionalDate(volume.lv_time),
creationHost: parseOptionalString(volume.lv_host),
neededKernelModules: parseStringList(volume.lv_modules),
dataVolume: parseOptionalString(volume.data_lv), // For thin and cache pools only
metadataVolume: parseOptionalString(volume.metadata_lv), // For thin and cache pools only
poolVolume: parseOptionalString(volume.pool_lv), // For thin volumes only
persistentMajorNumber: (volume.lv_major !== "-1") ? parseIntStrict(volume.lv_major) : undefined,
persistentMinorNumber: (volume.lv_minor !== "-1") ? parseIntStrict(volume.lv_minor) : undefined,
// Volume type: (C)ache, (m)irrored, (M)irrored without initial sync, (o)rigin, (O)rigin with merging snapshot, (r)aid, (R)aid without initial sync, (s)napshot, merging (S)napshot, (p)vmove, (v)irtual, mirror or raid (i)mage, mirror or raid (I)mage out-of-sync, mirror (l)og device, under (c)onversion, thin (V)olume, (t)hin pool, (T)hin pool data, v(d)o pool, v(D)o pool data, raid or pool m(e)tadata or pool metadata spare.
type: mapFlag(volume.lv_attr, 0, {
C: "CACHE",
m: "MIRRORED",
M: "MIRRORED",
o: "ORIGIN",
O: "ORIGIN",
r: "RAID",
R: "RAID",
s: "SNAPSHOT",
S: "SNAPSHOT",
p: "PVMOVE",
v: "VIRTUAL",
i: "IMAGE",
I: "IMAGE",
l: "LOG_DEVICE",
c: "UNDER_CONVERSION",
V: "THIN_VOLUME",
t: "THIN_POOL",
T: "THIN_POOL_DATA",
d: "VDO_POOL",
D: "VDO_POOL_DATA",
e: "METADATA",
"-": "NORMAL"
}),
// Permissions: (w)riteable, (r)ead-only, (R)ead-only activation of non-read-only volume
isReadOnly: mapFlag(volume.lv_attr, 1, {
w: false,
r: true,
R: false
}),
isCurrentlyReadOnly: mapFlag(volume.lv_attr, 1, {
w: false,
r: true,
R: true
}),
// Allocation policy: (a)nywhere, (c)ontiguous, (i)nherited, c(l)ing, (n)ormal This is capitalised if the volume is currently locked against allocation changes, for example during pvmove(8).
isAllocationLocked: isUppercase(volume.lv_attr[2]),
allocationPolicy: mapFlag(volume.lv_attr, 2, {
a: "ANYWHERE",
A: "ANYWHERE",
c: "CONTIGUOUS",
C: "CONTIGUOUS",
i: "INHERITED",
I: "INHERITED",
l: "CLING",
L: "CLING",
n: "NORMAL",
N: "NORMAL"
}),
// State: (a)ctive, (h)istorical, (s)uspended, (I)nvalid snapshot, invalid (S)uspended snapshot, snapshot (m)erge failed, suspended snapshot (M)erge failed, mapped (d)evice present without tables, mapped device present with (i)nactive table, thin-pool (c)heck needed, suspended thin-pool (C)heck needed, (X) unknown
status: mapFlag(volume.lv_attr, 4, {
a: "ACTIVE",
h: "HISTORICAL",
s: null,
I: "SNAPSHOT_INVALID",
S: "SNAPSHOT_INVALID",
m: "SNAPSHOT_MERGE_FAILED",
M: "SNAPSHOT_MERGE_FAILED",
d: "TABLES_MISSING",
i: "TABLES_INACTIVE",
c: "THIN_POOL_CHECK_NEEDED",
C: "THIN_POOL_CHECK_NEEDED",
X: "UNKNOWN"
}),
// isSuspended: mapFlag(volume.lv_attr, 4, {
// a: false,
// h: false,
// s: true,
// I: false,
// S: true,
// m: false,
// M: true,
// d: false,
// i: false,
// c: false,
// C: true,
// X: false
// }),
// Newly-allocated data blocks are overwritten with blocks of (z)eroes before use.
// isZeroFilled: mapFlag(volume.lv_attr, 7, {
// z: true,
// "-": false
// }),
// Volume Health, where there are currently three groups of attributes identified:
// (p)artial signifies that one or more of the Physical Volumes this Logical Volume uses is missing from the system. (X) unknown signifies the status is unknown.
// (r)efresh signifies that one or more of the Physical Volumes this RAID Logical Volume uses had suffered a write error. The write error could be due to a temporary failure of that Physical Volume or an indication that it is failing. The device should be refreshed or replaced. (m)ismatches signifies that the RAID logical volume has portions of the array that are not coherent. Inconsistencies are detected by initiating a "check" on a RAID logical volume. (The scrubbing operations, "check" and "repair", can be performed on a RAID logical volume via the 'lvchange' command.) (w)ritemostly signifies the devices in a RAID 1 logical volume that have been marked write-mostly. Re(s)haping signifies a RAID Logical Volume is either undergoing a stripe addition/removal, a stripe size or RAID algorithm change. (R)emove after reshape signifies freed striped raid images to be removed.
// (F)ailed is set if thin pool encounters serious failures and hence no further I/O is permitted at all. The out of (D)ata space is set if thin pool has run out of data space. (M)etadata read only signifies that thin pool encounters certain types of failures but it's still possible to do reads at least, but no metadata changes are allowed.
// (F)ailed is set when related thin pool enters Failed state and no further I/O is permitted at all.
// (E)rror is set dm-writecache reports an error.
healthStatus: mapFlag(volume.lv_attr, 8, {
X: "UNKNOWN",
p: "PV_MISSING",
r: "RAID_REPLACE_DEVICE",
m: "RAID_MISMATCH",
w: "RAID_PREFER_WRITE_ONLY",
s: "RAID_RESHAPING",
R: "RAID_REMOVE_UNNECESSARY",
F: "FAILED",
D: "OUT_OF_DATA_SPACE",
M: "METADATA_FAILURE_READ_ONLY",
E: "WRITECACHE_ERROR_REPORTED",
"-": "HEALTHY"
}),
// s(k)ip activation: this volume is flagged to be skipped during activation.
// isActivationSkipped: mapFlag(volume.lv_attr, 9, {
// k: true,
// "-": false
// }),
isInitiallySynchronized: (volume.lv_initial_image_sync === "initial image sync"),
isCurrentlySynchronized: (volume.lv_image_synced === "image synced"),
isMerging: (volume.lv_merging === "merging"),
isConverting: (volume.lv_converting === "converting"),
isSuspended: (volume.lv_suspended === "suspended"),
isActivationSkipped: (volume.lv_skip_activation === "skip activation"),
isOpened: (volume.lv_device_open === "open"),
isActiveLocally: (volume.lv_active_locally === "active locally"),
isActiveRemotely: (volume.lv_active_remotely === "active remotely"),
isActiveExclusively: (volume.lv_active_exclusively === "active exclusively"),
isMergeFailed: (volume.lv_merge_failed === "merge failed"),
isSnapshotInvalid: (volume.lv_merge_failed === "snapshot invalid"), // Snapshots only
isLiveTablePresent: (volume.lv_live_table === "live table present"),
isInactiveTablePresent: (volume.lv_live_table === "inactive table present"),
isZeroFilled: (volume.zero === "zero"), // Thin pools only
hasFixedMinorNumber: (volume.lv_fixed_minor === "fixed minor"),
outOfSpacePolicy: matchValue(volume.lv_when_full, {
error: "ERROR",
queue: "QUEUE",
"": null
})
};
})
};
}))
.execute();
}).then((output) => {
return output.result;
});
};

@ -0,0 +1,45 @@
"use strict";
const Promise = require("bluebird");
const execBinary = require("../../exec-binary");
const parseIECBytes = require("../../parse-bytes-iec");
const asJson = require("../modifiers/as-json");
const mapFlag = require("../map-flag");
module.exports = function () {
return Promise.try(() => {
return execBinary("pvs")
.asRoot()
.withModifier(asJson((result) => {
return {
volumes: result.report[0].pv.map((volume) => {
return {
path: volume.pv_name,
volumeGroup: (volume.vg_name === "") ? null : volume.vg_name,
format: volume.pv_fmt,
// FIXME: These amounts can contain commas depending on locale (eg. https://serverfault.com/a/648302)
totalSpace: parseIECBytes(volume.pv_size),
freeSpace: parseIECBytes(volume.pv_free),
status: mapFlag(volume.pv_attr, 0, {
d: "DUPLICATE",
a: "ALLOCATABLE",
u: "USED"
}),
isExported: mapFlag(volume.pv_attr, 1, {
x: true,
"-": false
}),
isMissing: mapFlag(volume.pv_attr, 2, {
m: true,
"-": false
}),
};
})
};
}))
.execute();
}).then((output) => {
return output.result;
});
};

@ -0,0 +1,89 @@
"use strict";
const Promise = require("bluebird");
const execBinary = require("../../exec-binary");
const parseIECBytes = require("../../parse-bytes-iec");
const parseIntStrict = require("../../parse-int-strict");
const asJson = require("../modifiers/as-json");
const mapFlag = require("../map-flag");
const parseOptionalString = require("../parse-optional-string");
const parseStringList = require("../parse-string-list");
module.exports = function () {
return Promise.try(() => {
return execBinary("vgs")
.asRoot()
.withFlags({
options: "vg_all"
})
.withModifier(asJson((result) => {
return {
groups: result.report[0].vg.map((group) => {
return {
name: group.vg_name,
uuid: group.vg_uuid,
systemID: parseOptionalString(group.vg_systemid),
metadataRevision: parseIntStrict(group.vg_seqno),
tags: parseStringList(group.vg_tags),
configurationProfile: parseOptionalString(group.vg_profile),
// FIXME: vg_lock_type, vg_lock_args
// FIXME: These amounts can contain commas depending on locale (eg. https://serverfault.com/a/648302)
totalSpace: parseIECBytes(group.vg_size),
freeSpace: parseIECBytes(group.vg_free),
extentSize: parseIECBytes(group.vg_extent_size),
totalExtents: parseIntStrict(group.vg_extent_count),
freeExtents: parseIntStrict(group.vg_free_count),
freeMetadataSpace: parseIECBytes(group.vg_mda_free),
smallestMetadataSize: parseIECBytes(group.vg_mda_size),
totalMetadataAreas: parseIntStrict(group.vg_mda_count),
usedMetadataAreas: parseIntStrict(group.vg_mda_used_count),
usedMetadataAreasTarget: (group.vg_mda_copies === "unmanaged" ? undefined : parseIntStrict(group.vg_mda_copies)),
physicalVolumeCount: parseIntStrict(group.pv_count),
physicalVolumeLimit: (group.max_pv === "0") ? Infinity : parseIntStrict(group.max_pv),
missingPhysicalVolumes: parseIntStrict(group.vg_missing_pv_count),
logicalVolumeCount: parseIntStrict(group.lv_count),
logicalVolumeLimit: (group.max_lv === "0") ? Infinity : parseIntStrict(group.max_lv),
snapshotCount: parseIntStrict(group.snap_count),
isExtendable: (group.vg_extendable === "extendable"),
isReadOnly: mapFlag(group.vg_attr, 0, {
// Permissions: (w)riteable, (r)ead-only
r: true,
w: false
}),
isResizeable: mapFlag(group.vg_attr, 1, {
// Resi(z)eable
z: true,
"-": false
}),
isExported: mapFlag(group.vg_attr, 2, {
// E(x)ported
x: true,
"-": false
}),
isIncomplete: mapFlag(group.vg_attr, 3, {
// (p)artial: one or more physical volumes belonging to the volume group are missing from the system
p: true,
"-": false
}),
allocationPolicy: mapFlag(group.vg_attr, 4, {
// Allocation policy: (c)ontiguous, c(l)ing, (n)ormal, (a)nywhere
c: "CONTIGUOUS",
l: "CLING",
n: "NORMAL",
a: "ANYWHERE"
}),
mode: mapFlag(group.vg_attr, 5, {
c: "CLUSTERED",
s: "SHARED",
"-": "LOCAL"
})
};
})
};
}))
.execute();
}).then((output) => {
return output.result;
});
};

@ -0,0 +1,23 @@
"use strict";
const Promise = require("bluebird");
const path = require("path");
const execBinary = require("../../exec-binary");
const createPegParser = require("../../text-parser-pegjs");
let versionParser = createPegParser({
grammarFile: path.join(__dirname, "./version.pegjs")
});
module.exports = function () {
return Promise.try(() => {
return versionParser;
}).then((parser) => {
return execBinary("lvm", ["version"])
.asRoot()
.requireOnStdout(parser)
.execute();
}).then((output) => {
return output.result;
});
};

@ -0,0 +1,24 @@
import { SameLine as _ } from "../../peg-whitespace"
import { RestOfLine } from "../../peg-rest-of-line"
{
const syncpipe = require("syncpipe");
const mapVersionTitle = require("../map-version-title");
}
Output
= entries:VersionLine+ {
// FIXME/MARKER: Build a generic abstraction for fromNamedEntries or so
return syncpipe(entries, [
(_) => _.map(({ key, value }) => [ key, value ]),
(_) => Object.fromEntries(_)
]);
}
VersionLine
= _ label:$[A-Za-z ]+ ":" _ version:RestOfLine {
return {
key: mapVersionTitle(label), // FIXME/MARKER: Rename to mapVersionLabel
value: version.trim()
};
}

@ -9,4 +9,6 @@ module.exports = {
VolumeGroupExists: errorChain.create("VolumeGroupExists"),
InvalidVolumeGroup: errorChain.create("InvalidVolumeGroup"),
PhysicalVolumeInUse: errorChain.create("PhysicalVolumeInUse"),
DeviceInUse: errorChain.create("PhysicalVolumeInUse"),
IncompatibleDevice: errorChain.create("IncompatibleDevice")
};

@ -1,243 +1,15 @@
"use strict";
const Promise = require("bluebird");
const { chain } = require("error-chain");
const execBinary = require("../exec-binary");
const parseIECBytes = require("../parse-bytes-iec");
const errors = require("./errors");
function mapVersionTitle(title) {
if (title === "LVM version") {
return "lvm";
} else if (title === "Library version") {
return "library";
} else if (title === "Driver version") {
return "driver";
} else if (title === "Configuration") {
return "configuration";
} else {
throw new Error(`Unrecognized version type for LVM: ${title}`);
}
}
function unattendedFlags(command) {
/* This will answer "no" to any safety prompts, cancelling the operation if safety issues arise. */
return command.withFlags({
q: [true, true]
});
}
function forceFlags(command) {
/* This will force-bypass safety checks, for when the administrator has indicated that they want to take the risk. */
return command.withFlags({
force: true
});
}
function asJson(resultMapper) {
return function (command) {
return command
.expectJsonStdout(resultMapper)
.withFlags({
reportformat: "json"
});
};
}
function hasFlag(flag) {
return function (error) {
if (error.getAllContext != null) {
let context = error.getAllContext();
/* The below counts *any* kind of non-null value as having a flag set, to accommodate matchAll scenarios and scenarios where the flag needs to contain further information. */
return (context.result != null && context.result[flag] != null);
} else {
return false;
}
};
}
// FIXME: Convert to new execBinary API
module.exports = {
getVersions: function () {
return Promise.try(() => {
return execBinary("lvm", ["version"])
.asRoot()
.singleResult()
.expectStdout("versions", /^\s*([^:]+):\s*(.+)$/gm, {
required: true,
matchAll: true,
result: ([title, version]) => {
return {
key: mapVersionTitle(title),
value: version
};
}
})
.execute();
}).then(({result}) => {
return result.reduce((object, entry) => {
return Object.assign(object, {
[entry.key]: entry.value
});
}, {});
});
},
getPhysicalVolumes: function () {
return Promise.try(() => {
return execBinary("pvs")
.asRoot()
.singleResult()
.withModifier(asJson((result) => {
return result.report[0].pv.map((volume) => {
return {
path: volume.pv_name,
volumeGroup: (volume.vg_name === "") ? null : volume.vg_name,
format: volume.pv_fmt,
totalSpace: parseIECBytes(volume.pv_size),
freeSpace: parseIECBytes(volume.pv_free),
isDuplicate: volume.pv_attr.includes("d"),
isAllocatable: volume.pv_attr.includes("a"),
isUsed: volume.pv_attr.includes("u"),
isExported: volume.pv_attr.includes("x"),
isMissing: volume.pv_attr.includes("m"),
};
});
}))
.execute();
}).then((output) => {
return output.result;
});
},
createPhysicalVolume: function ({ devicePath, force }) {
return Promise.try(() => {
return execBinary("pvcreate", [devicePath])
.asRoot()
.withModifier((force === true) ? forceFlags : unattendedFlags)
.expectStderr("deviceNotFound", /Device .+ not found\./, { result: () => true })
.expectStderr("partitionTableExists", /WARNING: [a-z]+ signature detected on/, { result: () => true })
.execute();
}).then((_output) => {
return true;
}).catch(hasFlag("deviceNotFound"), (error) => {
throw chain(error, errors.InvalidPath, `Specified device '${devicePath}' does not exist`, {
path: devicePath
});
}).catch(hasFlag("partitionTableExists"), (error) => {
throw chain(error, errors.PartitionExists, `Refused to create a Physical Volume, as a partition or partition table already exists on device '${devicePath}'`, {
path: devicePath
});
});
},
destroyPhysicalVolume: function ({ devicePath }) {
return Promise.try(() => {
return execBinary("pvremove", [devicePath])
.asRoot()
.atLeastOneResult()
.expectStdout("success", /Labels on physical volume "[^"]+" successfully wiped\./)
.expectStderr("deviceNotFound", /Device .+ not found\./, { result: () => true })
.expectStderr("notAPhysicalVolume", /No PV label found on .+\./, { result: () => true })
.execute();
}).then((_output) => {
return true;
}).catch(hasFlag("deviceNotFound"), (error) => {
throw chain(error, errors.InvalidPath, `Specified device '${devicePath}' does not exist`, {
path: devicePath
});
}).catch(hasFlag("notAPhysicalVolume"), (error) => {
throw chain(error, errors.InvalidPath, `Specified device '${devicePath}' is not a Physical Volume`, {
path: devicePath
});
});
},
createVolumeGroup: function ({ name, physicalVolumes }) {
return Promise.try(() => {
if (/^[a-zA-Z0-9_][a-zA-Z0-9+_.-]*$/.test(name)) {
return execBinary("vgcreate", [name, ...physicalVolumes])
.asRoot()
.withModifier(unattendedFlags)
.expectStderr("volumeGroupExists", /A volume group called ([^"]+) already exists\./, { result: () => true })
.expectStderr("partitionTableExists", /WARNING: [a-z]+ signature detected on (.+) at offset/g, {
result: ([device]) => device,
matchAll: true
})
.expectStderr("deviceNotFound", /Device (.+) not found\./g, {
result: ([device]) => device,
matchAll: true
})
.expectStderr("physicalVolumeInUse", /Physical volume '([^']+)' is already in volume group '([^']+)'/g, {
result: ([device, volumeGroup]) => ({device, volumeGroup}),
matchAll: true
})
.execute();
} else {
throw new errors.InvalidName(`The specified Volume Group name '${name}' contains invalid characters`);
}
}).then((_output) => {
return true;
}).catch(hasFlag("deviceNotFound"), (error) => {
let failedDevices = error.getAllContext().result.deviceNotFound;
throw chain(error, errors.InvalidPath, `The following specified devices do not exist: ${failedDevices.join(", ")}`, {
paths: failedDevices
});
}).catch(hasFlag("partitionTableExists"), (error) => {
let failedDevices = error.getAllContext().result.partitionTableExists;
throw chain(error, errors.PartitionExists, `Refused to create a Volume Group, as partitions or partition tables already exist on the following devices: ${failedDevices.join(", ")}`, {
paths: failedDevices
});
}).catch(hasFlag("volumeGroupExists"), (error) => {
throw chain(error, errors.VolumeGroupExists, `A volume group with the name '${name}' already exists`, {
volumeGroupName: name
});
}).catch(hasFlag("physicalVolumeInUse"), (error) => {
let failedItems = error.getAllContext().result.physicalVolumeInUse;
let failedItemString = failedItems.map(({device, volumeGroup}) => {
return `${device} (${volumeGroup})`;
}).join(", ");
throw chain(error, errors.PhysicalVolumeInUse, `The following specified Physical Volumes are already in use in another Volume Group: ${failedItemString}`, {
volumes: failedItems
});
});
},
addVolumeToVolumeGroup: function ({ physicalVolume, volumeGroup }) {
return Promise.try(() => {
return execBinary("vgextend", [volumeGroup, physicalVolume])
.asRoot()
.withModifier(unattendedFlags)
.expectStderr("deviceNotFound", /Device .+ not found\./, { result: () => true })
.expectStderr("volumeGroupNotFound", /Volume group "[^"]+" not found/, { result: () => true })
.expectStderr("partitionTableExists", /WARNING: [a-z]+ signature detected on/, { result: () => true })
.expectStderr("physicalVolumeInUse", /Physical volume '([^']+)' is already in volume group '([^']+)'/, {
result: ([device, volumeGroup]) => ({device, volumeGroup})
})
.execute();
}).then((_output) => {
return true;
}).catch(hasFlag("deviceNotFound"), (error) => {
throw chain(error, errors.InvalidPath, `Specified device '${physicalVolume}' does not exist`, {
path: physicalVolume
});
}).catch(hasFlag("volumeGroupNotFound"), (error) => {
throw chain(error, errors.InvalidVolumeGroup, `Specified Volume Group '${volumeGroup}' does not exist`, {
volumeGroupName: volumeGroup
});
}).catch(hasFlag("physicalVolumeInUse"), (error) => {
let volume = error.getAllContext().result.physicalVolumeInUse;
throw chain(error, errors.PhysicalVolumeInUse, `Specified Physical Volume '${physicalVolume}' is already in use in another Volume Group (${volume.volumeGroup})`, {
volume: volume
});
}).catch(hasFlag("partitionTableExists"), (error) => {
throw chain(error, errors.PartitionExists, `Refused to add device to Volume Group, as a partition or partition table already exists on device '${physicalVolume}'`, {
path: physicalVolume
});
});
}
getVersions: require("./commands/version"),
getPhysicalVolumes: require("./commands/get-physical-volumes"),
getVolumeGroups: require("./commands/get-volume-groups"),
getLogicalVolumes: require("./commands/get-logical-volumes"),
createPhysicalVolume: require("./commands/create-physical-volume"),
destroyPhysicalVolume: require("./commands/destroy-physical-volume"),
createVolumeGroup: require("./commands/create-volume-group"),
addVolumeToVolumeGroup: require("./commands/add-volume-to-volume-group")
};
// TODO: Need to check if cache service running?
// TODO: Wrap errors in commands in the appropriate chained error types, beyond a generic CommandExecutionFailed from exec-binary?

@ -0,0 +1,7 @@
"use strict";
const matchValue = require("match-value");
module.exports = function mapFlag(flagString, index, mapper) {
return matchValue(flagString[index], mapper);
};

@ -0,0 +1,12 @@
"use strict";
const matchValue = require("match-value");
module.exports = function mapVersionTitle(title) {
return matchValue(title, {
"LVM version": "lvm",
"Library version": "library",
"Driver version": "driver",
"Configuration": "configuration"
});
};

@ -0,0 +1,14 @@
"use strict";
const createJSONParser = require("../../text-parser-json");
module.exports = function asJson(resultMapper) {
return function (command) {
return command
.requireOnStdout(createJSONParser(resultMapper))
.withFlags({
reportformat: "json",
units: "h" // Prevent < rounding indicator in output
});
};
};

@ -0,0 +1,8 @@
"use strict";
module.exports = function forceFlags(command) {
/* This will force-bypass safety checks, for when the administrator has indicated that they want to take the risk. */
return command.withFlags({
force: true
});
};

@ -0,0 +1,21 @@
"use strict";
const { errorResult } = require("../../text-parser");
const createRegexParser = require("../../text-parser-regex");
const errors = require("../errors");
module.exports = function (devicePath) {
return function handleDeviceInUse(command) {
return command.expectOnStderr(createRegexParser(/Can't initialize physical volume "([^"]+)" of volume group "([^"]+)" without -ff/, (match) => {
let [ _device, existingVolumeGroup ] = match.subMatches;
return errorResult(new errors.DeviceInUse(`Specified device '${devicePath}' is already in use as a Physical Volume in another Volume Group (${existingVolumeGroup})`, {
volume: {
device: devicePath,
volumeGroup: existingVolumeGroup
}
}));
}));
};
};

@ -0,0 +1,16 @@
"use strict";
const { errorResult } = require("../../text-parser");
const createRegexParser = require("../../text-parser-regex");
const errors = require("../errors");
module.exports = function (devicePath) {
return function handleDeviceNotFound(command) {
return command.expectOnStderr(createRegexParser(/Device .+ not found\./, () => {
return errorResult(new errors.InvalidPath(`Specified device '${devicePath}' does not exist`, {
path: devicePath
}));
}));
};
};

@ -0,0 +1,16 @@
"use strict";
const { errorResult } = require("../../text-parser");
const createRegexParser = require("../../text-parser-regex");
const errors = require("../errors");
module.exports = function (devicePath, action) {
return function handleIncompatibleDevice(command) {
return command.expectOnStderr(createRegexParser(/Device .+ excluded by a filter\./, () => {
return errorResult(new errors.IncompatibleDevice(`Specified device '${devicePath}' could not be ${action} because it is incompatible; perhaps it's too small?`, {
path: devicePath
}));
}));
};
};

@ -0,0 +1,16 @@
"use strict";
const { errorResult } = require("../../text-parser");
const createRegexParser = require("../../text-parser-regex");
const errors = require("../errors");
module.exports = function (devicePath, action) {
return function handlePartitionExists(command) {
return command.expectOnStderr(createRegexParser(/WARNING: [a-z]+ signature detected on/, () => {
return errorResult(new errors.PartitionExists(`Refused to ${action}, as a partition or partition table already exists on device '${devicePath}'`, {
path: devicePath
}));
}));
};
};

@ -0,0 +1,35 @@
"use strict";
const { errorResult } = require("../../text-parser");
const createRegexParser = require("../../text-parser-regex");
const errors = require("../errors");
module.exports = function (physicalVolume) {
return function handlePhysicalVolumeInUse(command) {
return command
.expectOnStderr(createRegexParser(/Physical volume '([^']+)' is already in volume group '([^']+)'/, (match) => {
let [ _device, existingVolumeGroup ] = match.subMatches;
return errorResult(new errors.PhysicalVolumeInUse(`Specified Physical Volume '${physicalVolume}' is already in use in another Volume Group (${existingVolumeGroup})`, {
volume: {
device: physicalVolume,
volumeGroup: existingVolumeGroup
}
}));
}))
.expectOnStderr(createRegexParser(/PV (.+) is used by VG (.+) so please use vgreduce first\./, (match) => {
let [ _device, existingVolumeGroup ] = match.subMatches;
// FIXME: Improve context structure here?
return errorResult(new errors.PhysicalVolumeInUse(`Specified Physical Volume '${physicalVolume}' is still in use in Volume Group '${existingVolumeGroup}'`, {
volume: {
device: physicalVolume,
volumeGroup: existingVolumeGroup
}
}));
}));
};
};
// /dev/loop0 is used by VG vg-name so please use vgreduce first.

@ -0,0 +1,16 @@
"use strict";
const { errorResult } = require("../../text-parser");
const createRegexParser = require("../../text-parser-regex");
const errors = require("../errors");
module.exports = function (volumeGroup) {
return function handleVolumeGroupNotFound(command) {
return command.expectOnStderr(createRegexParser(/Volume group "[^"]+" not found/, () => {
return errorResult(new errors.InvalidVolumeGroup(`Specified Volume Group '${volumeGroup}' does not exist`, {
volumeGroupName: volumeGroup
}));
}));
};
};

@ -0,0 +1,8 @@
"use strict";
module.exports = function unattendedFlags(command) {
/* This will answer "no" to any safety prompts, cancelling the operation if safety issues arise. */
return command.withFlags({
q: [true, true]
});
};

@ -0,0 +1,9 @@
"use strict";
module.exports = function parseOptionalString(string) {
if (string.length === 0) {
return undefined;
} else {
return new Date(string);
}
};

@ -0,0 +1,9 @@
"use strict";
module.exports = function parseOptionalString(string) {
if (string.length === 0) {
return undefined;
} else {
return string;
}
};

@ -0,0 +1,9 @@
"use strict";
module.exports = function parseStringList(string) {
if (string.length === 0) {
return [];
} else {
return string.split(",");
}
};

@ -11,7 +11,7 @@ function createNamespaceParser() {
return {
namespaces: execAll(/^\[\s*[0-9]+\]:(?:(0)|0x([0-9A-F]+))$/gm, input)
.map((match) => {
let [ idLiteral, idHex ] = match.sub;
let [ idLiteral, idHex ] = match.subMatches;
if (idLiteral != null) {
/* NOTE: This is a special case for when the value is exactly 0 - and maybe there are others too, hence still doing a parseInt, so we can easily change the regex later if needed:

@ -2,8 +2,8 @@
const Promise = require("bluebird");
const path = require("path");
const execBinary = require("../exec-binary");
const createPegParser = require("../text-parser-pegjs");
const execBinary = require("../exec-binary");
const itemsToObject = require("../items-to-object");
/* FIXME: Error handling, eg. device not found errors */

@ -2,8 +2,9 @@ import { Integer } from "../../../peg-number"
import { HexInteger } from "../../../peg-hex-number"
import { Newline } from "../../../peg-newline"
import { SameLine as _ } from "../../../peg-whitespace"
import { RestOfLine } from "../../../peg-rest-of-line"
import { RestOfLine, IdentifierValue } from "../primitives"
import { IdentifierValue } from "../primitives"
import { Header } from "../shared"
{

@ -2,8 +2,9 @@ import { Integer } from "../../../peg-number"
import { CommaDelimitedInteger as GroupedNumber } from "../../../peg-grouped-number"
import { Newline } from "../../../peg-newline"
import { SameLine as _ } from "../../../peg-whitespace"
import { RestOfLine } from "../../../peg-rest-of-line"
import { RestOfLine, BytesValue } from "../primitives"
import { BytesValue } from "../primitives"
import { Header } from "../shared"
{

@ -1,6 +1,5 @@
import { SameLine as _ } from "../../../peg-whitespace"
import { RestOfLine } from "../primitives"
import { RestOfLine } from "../../../peg-rest-of-line"
RootScan
= devices:ScanDevice* {

@ -6,11 +6,6 @@ import { CommaDelimitedInteger as GroupedInteger } from "../../peg-grouped-numbe
const {B} = require("../../unit-bytes-iec");
}
RestOfLine
= content:$[^\n]+ Newline {
return content;
}
BytesValue
= value:GroupedInteger {
return B(value);

@ -1,6 +1,5 @@
import { Newline } from "../../peg-newline"
import { RestOfLine } from "./primitives"
import { RestOfLine } from "../../peg-rest-of-line"
Header 'header'
= "smartctl " versionString:RestOfLine "Copyright" copyrightStatement:RestOfLine Newline {

@ -4,6 +4,8 @@
toDisplay
conversion between unit scales (eg. IEC -> metric bytes)
ensure NaN is handled correctly
Track the originally-constructed value internally, so that stacked conversions can be done losslessly?
Additionally perhaps an isExact method that returns whether the current representation was the original one?
*/
const util = require("util");

@ -0,0 +1,9 @@
"use strict";
module.exports = function mapMaybeArray(value, handler) {
if (Array.isArray(value)) {
return value.map(handler);
} else {
return handler(value);
}
};

@ -0,0 +1,14 @@
"use strict";
const integerRegex = /^-?[0-9]+(?:e[0-9]+)?$/;
// TODO: Other bases than 10
module.exports = function parseIntStrict(number) {
if (typeof number === "number" && Number.isInteger(number)) {
return number;
} else if (typeof number === "string" && integerRegex.test(number)) {
return parseInt(number);
} else {
throw new TypeError(`Input is not an integer or integer string`);
}
};

@ -325,6 +325,10 @@ let mountOptionMap = {
[Include]: ["fat"]
/* FIXME */
},
udf: {
// https://www.kernel.org/doc/Documentation/filesystems/udf.txt
// TODO
},
devpts: {
uid: { newPTYOwnerId: Value },
gid: { newPTYGroupId: Value },
@ -408,6 +412,14 @@ let mountOptionMap = {
// TODO
[Include]: [ "fuse" ]
},
"fuse.fuseiso": {
// TODO
[Include]: [ "fuse" ]
},
"fuse.portal": {
// TODO
[Include]: [ "fuse" ]
},
fusectl: {
// TODO
}

@ -0,0 +1,6 @@
import Newline from "../peg-newline"
RestOfLine
= content:$[^\n]+ Newline {
return content;
}

@ -1,10 +1,27 @@
"use strict";
module.exports = function createJsonParser() {
const { chain } = require("error-chain");
const ParseError = require("../text-parser");
module.exports = function createJsonParser(resultMapper) {
return {
supportsStreams: false,
parse: function (text) {
return JSON.parse(text);
try {
let parsed = JSON.parse(text);
if (resultMapper != null) {
return resultMapper(parsed);
} else {
return parsed;
}
} catch (error) {
if (error instanceof SyntaxError) {
throw chain(error, ParseError, "JSON parsing failed with a syntax error");
} else {
throw error;
}
}
}
};
};

@ -6,7 +6,7 @@ const moduleEval = require("eval");
const vm = require("vm");
const asExpression = require("as-expression");
const { chain } = require("error-chain");
const textParser = require("../text-parser");
const { ParseError } = require("../text-parser");
const { validateOptions } = require("@validatem/core");
const isString = require("@validatem/is-string");
@ -63,7 +63,7 @@ module.exports = function createPegParser(_options) {
return parser.parse(text);
} catch (error) {
if (error.name === "SyntaxError") {
throw chain(error, textParser.NoResult, "Parsing output failed");
throw chain(error, ParseError, "Parsing output failed");
} else {
throw error;
}

@ -0,0 +1,27 @@
"use strict";
const execall = require("execall");
const { ParseError } = require("../text-parser");
module.exports = function createRegexParser(regex, resultMapper) {
return {
supportsStreams: false,
parse: function (text) {
let matches = execall(regex, text);
if (matches.length > 0) {
let matchResult = (regex.global === true)
? matches
: matches[0];
if (resultMapper != null) {
return resultMapper(matchResult);
} else {
return matchResult;
}
} else {
throw new ParseError(`Input did not match regular expression ${regex}`);
}
}
};
};

@ -7,5 +7,24 @@ module.exports = {
return parser.parse(text);
},
// FIXME: Force global implementation!
NoResult: errorChain.create("NoResult")
ParseError: errorChain.create("ParseError"),
errorResult: function (error) {
// FIXME: Validatem
if (!(error instanceof Error)) {
throw new Error(`Attempted to initialize an errorResult with a value that is not an Error; this is invalid`);
}
return {
__textParser_isErrorResult: true,
get: function () {
return error;
},
throw: function () {
throw error;
}
};
},
isErrorResult: function (value) {
return (value != null && value.__textParser_isErrorResult === true);
}
};

@ -2,6 +2,7 @@
const assureArray = require("assure-array");
const shallowMerge = require("../shallow-merge");
const syncpipe = require("syncpipe");
const { validateArguments } = require("@validatem/core");
const required = require("@validatem/required");
@ -112,5 +113,16 @@ module.exports = {
}
return topLevel;
},
map: function (tree, mapFunc) {
return syncpipe(tree, [
(_) => this.flatten(_),
(_) => _.map((item) => ({
... mapFunc(item),
_treecutterDepth: item._treecutterDepth,
_treecutterSequenceNumber: item._treecutterSequenceNumber,
})),
(_) => this.rebuild(_)
]);
}
};

@ -0,0 +1,13 @@
'use strict';
const Promise = require("bluebird");
module.exports = function({db}) {
let router = require("express-promise-router")();
router.get("/storage", (req, res) => {
res.render("resource-pools/storage/list");
});
return router;
};

@ -368,6 +368,39 @@ type LVMVolumeGroup {
name: String!
}
interface Image {
id: String!
name: String!
description: String
thumbnail: String
originalSource: String!
# The below are only available after the image has been downloaded
filesize: ByteSize
storagePath: String
}
type InstallationMedium implements Image {
id: String!
name: String!
description: String
thumbnail: String
originalSource: String!
# The below are only available after the image has been downloaded
filesize: ByteSize
storagePath: String
}
type VMImage implements Image {
id: String!
name: String!
description: String
thumbnail: String
originalSource: String!
# The below are only available after the image has been downloaded
filesize: ByteSize
storagePath: String
}
type HardwareQuery {
drives(paths: [String]): [PhysicalDrive!]!
}
@ -377,9 +410,16 @@ type LVMQuery {
volumeGroups: [LVMVolumeGroup!]!
}
type ImagesQuery {
installationMedia: [InstallationMedium!]!
vmImages: [VMImage!]!
}
type ResourcesQuery {
blockDevices: [BlockDevice!]!
lvm: LVMQuery
# TODO: RAID
images: ImagesQuery
}
type Query {

@ -2,6 +2,7 @@
const Promise = require("bluebird");
const util = require("util");
const { render, getContext } = require("error-chain");
const lsblk = require("./packages/exec-lsblk");
const lvm = require("./packages/exec-lvm");
@ -10,9 +11,10 @@ const findmnt = require("./packages/exec-findmnt");
const nvmeCli = require("./packages/exec-nvme-cli");
return Promise.try(() => {
// return lvm.getVersions();
return lvm.getVersions();
// return lvm.getPhysicalVolumes();
// return lvm.createPhysicalVolume({ devicePath: "/dev/loop0" });
// return lvm.createPhysicalVolume({ devicePath: "/dev/loopasdfasdfasdf" });
// return lvm.createPhysicalVolume({ devicePath: process.argv[2] });
// return lvm.createVolumeGroup({ name: "not a valid name", physicalVolumes: ["/dev/loop0", "/dev/asdfasdfasdf", "/dev/gasdfgasdf"] });
// return lvm.createVolumeGroup({ name: "vg-name", physicalVolumes: ["/dev/loop0", "/dev/asdfasdfasdf", "/dev/gasdfgasdf"] });
@ -26,7 +28,7 @@ return Promise.try(() => {
// return lvm.addVolumeToVolumeGroup({ volumeGroup: "vg-name", physicalVolume: "/dev/loop1" });
// return lvm.destroyPhysicalVolume({ devicePath: "/dev/loop0" });
// return lsblk();
return smartctl.scan();
// return smartctl.scan();
// return smartctl.info({ devicePath: "/dev/sda" })
// return smartctl.info({ devicePath: process.argv[2] })
// return smartctl.attributes({ devicePath: process.argv[2] });
@ -35,16 +37,21 @@ return Promise.try(() => {
}).then((result) => {
console.log(util.inspect(result, {colors: true, depth: null}));
}).catch((err) => {
if (err.getAllContext != null) {
let context = err.getAllContext()
console.log(context);
console.log("####################\n");
}
// if (err.getAllContext != null) {
// let context = err.getAllContext()
// console.log(context);
// console.log("####################\n");
// }
if (err.showChain != null) {
// console.log(err.showChain({ allStacktraces: true }));
console.log(err.showChain({}));
} else {
console.log(err.stack);
}
console.log(getContext(err));
console.log("################\n");
console.log(render(err));
// if (err.showChain != null) {
// // console.log(err.showChain({ allStacktraces: true }));
// console.log(err.showChain({}));
// } else {
// console.log(err.stack);
// }
});

@ -0,0 +1,72 @@
"use strict";
const React = require("react");
const classnames = require("classnames");
const syncpipe = require("syncpipe");
const splitFilterN = require("split-filter-n");
const gql = require("../../../packages/graphql-interface/tag");
const { B } = require("../../../packages/unit-bytes-iec");
const Layout = require("../layout");
// FIXME: For disk image feeds (eg. third-party image providers), have the admin specify a prefix which gets colon-prefixed to every named image in that feed (so that images may be auto-updated)
module.exports = {
query: gql`
query {
images {
installationMedia {
id
name
filesize
description
thumbnail
originalSource # URL/path
storagePath
}
}
}
`,
template: function StorageDeviceList({data}) {
return (
<Layout title="Installation Media">
<table className="installationMedia">
<tr>
<th></th>
<th>Name</th>
<th>Description</th>
<th>Size</th>
<th>Actions</th>
</tr>
{data.images.installationMedia.map((image) => {
return (
<tr>
<td>
{(image.thumbnail != null)
? <img
src={`/static/thumbnails/media/${image.thumbnail}`}
alt={`${image.name} Logo`}
/>
: null
}
</td>
<td>{image.name}</td>
<td>
{image.description}
<dl>
<dt>Source</dt>
<dd>{image.originalSource}</dd>
<dt>Stored at</dt>
<dd>{image.storagePath}</dd>
</dl>
</td>
</tr>
);
})}
</table>
</Layout>
);
}
};

@ -0,0 +1,23 @@
"use strict";
const React = require("react");
const MainLayout = require("../layout");
const MenuItem = require("../components/menu-item");
const prefixTitle = require("../../packages/maybe-prefix");
function Submenu() {
return (<>
<MenuItem path="/hardware/vm-images">VM Images</MenuItem>
<MenuItem path="/hardware/installation-media">Installation Media</MenuItem>
</>);
}
module.exports = function DiskImagesLayout({ children, title }) {
return (
<MainLayout submenu={<Submenu />} title={prefixTitle("Disk Images >", title)}>
{children}
</MainLayout>
);
};

@ -11,6 +11,8 @@ function Submenu() {
return (<>
<MenuItem path="/hardware/system-information">System Information</MenuItem>
<MenuItem path="/hardware/storage-devices">Storage Devices</MenuItem>
<MenuItem path="/hardware/lvm">LVM</MenuItem>
<MenuItem path="/hardware/raid">RAID</MenuItem>
<MenuItem path="/hardware/network-interfaces">Network Interfaces</MenuItem>
</>);
}

@ -2,10 +2,25 @@
const React = require("react");
const classnames = require("classnames");
const gql = require("../../../packages/graphql-interface/tag");
const syncpipe = require("syncpipe");
const splitFilterN = require("split-filter-n");
const { B } = require("../../../packages/unit-bytes-iec");
const treecutter = require("../../../packages/treecutter");
const Layout = require("../layout");
function sum(values) {
return values.reduce((total, value) => total + value, 0);
}
function sumDriveSizes(drives) {
return syncpipe(drives, [
(_) => _.map((drive) => drive.size.toB().amount),
(_) => sum(_),
(_) => B(_)
]);
}
function Indented({ depth, children }) {
return (
<div style={{ paddingLeft: depth * 10 }}>
@ -52,11 +67,12 @@ function PartitionEntry({partition, isLast}) {
}
function DriveEntry({drive}) {
let hasPartitions = (drive.partitions.length > 0);
let flattenedPartitions = treecutter.flatten(drive.partitions);
let hasPartitions = (flattenedPartitions.length > 0);
return (<>
<tr className={classnames({hasPartitions})}>
<td className={classnames("smart", drive.smartHealth)} rowSpan={1 + drive.partitions.length} />
<td className={classnames("smart", drive.smartHealth)} rowSpan={1 + flattenedPartitions.length} />
<td>{drive.path}</td>
<td>{drive.size.toDisplay(2).toString()}</td>
<td>
@ -70,48 +86,66 @@ function DriveEntry({drive}) {
<td>{drive.modelFamily}</td>
<td>{drive.firmwareVersion}</td>
</tr>
{drive.partitions.map((partition, i) => {
let isLast = (i === drive.partitions.length - 1);
{flattenedPartitions.map((partition, i) => {
let isLast = (i === flattenedPartitions.length - 1);
return <PartitionEntry partition={partition} isLast={isLast} />;
})}
</>);
}
function TallyRow({ label, rowClass, labelClass, children }) {
return (
<tr className={rowClass}>
<th colspan="2" className={labelClass}>{label}</th>
<td>{children}</td>
<td className="hidden" colspan="5"></td>
</tr>
);
}
module.exports = {
query: gql`
query {
hardware {
drives {
path
smartHealth
size
rpm
serialNumber
model
modelFamily
firmwareVersion
blockDevice {
name
}
query: {
hardware: {
drives: {
path: true,
smartHealth: true,
size: true,
rpm: true,
serialNumber: true,
model: true,
modelFamily: true,
firmwareVersion: true,
partitions: allBlockDevices {
_treecutterDepth
_treecutterSequenceNumber
blockDevice: {
name: true
},
name
size
partitions: {
$key: "allBlockDevices",
name: true,
size: true,
mounts {
mountpoint
}
mounts: {
mountpoint: true
},
children: {
$recurse: true,
$recurseLimit: Infinity, // 3 by default
}
}
}
}
`,
},
template: function StorageDeviceList({data}) {
let drivesByStatus = splitFilterN(data.hardware.drives, [ "HEALTHY", "DETERIORATING", "FAILING" ], (drive) => drive.smartHealth);
let totalStorage = sumDriveSizes(data.hardware.drives);
let totalHealthyStorage = sumDriveSizes(drivesByStatus.HEALTHY);
let totalAtRiskStorage = sumDriveSizes(drivesByStatus.DETERIORATING);
let totalFailingStorage = sumDriveSizes(drivesByStatus.FAILING);
return (
<Layout title="Storage Devices">
<table className="drives">
@ -126,6 +160,18 @@ module.exports = {
<th>Firmware version</th>
</tr>
{data.hardware.drives.map((drive) => <DriveEntry drive={drive} />)}
<TallyRow label="Total">
{totalStorage.toDisplay(2).toString()}
</TallyRow>
<TallyRow label="Healthy" rowClass="smartStatus" labelClass="healthy">
{totalHealthyStorage.toDisplay(2).toString()}
</TallyRow>
<TallyRow label="At-risk" rowClass="smartStatus" labelClass="atRisk">
{totalAtRiskStorage.toDisplay(2).toString()}
</TallyRow>
<TallyRow label="Failing" rowClass="smartStatus" labelClass="failing">
{totalFailingStorage.toDisplay(2).toString()}
</TallyRow>
</table>
</Layout>
);

@ -0,0 +1,23 @@
"use strict";
const React = require("react");
const MainLayout = require("../layout");
const MenuItem = require("../components/menu-item");
const prefixTitle = require("../../packages/maybe-prefix");
function Submenu() {
return (<>
<MenuItem path="/resource-pools/storage">Storage Pools</MenuItem>
<MenuItem path="/resource-pools/network">Network Pools</MenuItem>
</>);
}
module.exports = function HardwareLayout({ children, title }) {
return (
<MainLayout submenu={<Submenu />} title={prefixTitle("Resource Pools >", title)}>
{children}
</MainLayout>
);
};

@ -0,0 +1,29 @@
"use strict";
const React = require("react");
const gql = require("../../../packages/graphql-interface/tag");
const Layout = require("../layout");
module.exports = {
query: gql`
query {
resources {
storagePools {
id
type
name
path
isLocal
}
}
}
`,
template: function StorageDeviceList({data}) {
return (
<Layout title="Storage Pools">
Storage pools go here
</Layout>
);
}
};

@ -1,58 +0,0 @@
"use strict";
const Promise = require("bluebird");
const path = require("path");
const execBinary = require("../exec-binary");
const createPegParser = require("../text-parser-pegjs");
const itemsToObject = require("../../packages/items-to-object");
/* FIXME: Error handling, eg. device not found errors */
function outputParser(rootRule) {
return createPegParser({
grammarFile: path.join(__dirname, "./parser.pegjs"),
options: {
allowedStartRules: [ rootRule ]
}
});
}
module.exports = {
attributes: function ({ devicePath }) {
return Promise.try(() => {
return execBinary("smartctl", [devicePath])
.asRoot()
.withFlags({ attributes: true })
.requireOnStdout(outputParser("RootAttributes"))
.execute();
}).then((output) => {
// NOTE: Ignore the header, for now
return output.result.attributes;
});
},
info: function ({ devicePath }) {
return Promise.try(() => {
return execBinary("smartctl", [devicePath])
.asRoot()
.withFlags({ info: true })
.requireOnStdout(outputParser("RootInfo"))
.execute();
}).then((output) => {
// NOTE: Ignore the header, for now
return itemsToObject(output.result.fields);
});
},
scan: function () {
return Promise.try(() => {
return execBinary("smartctl")
.asRoot()
.withFlags({ scan: true })
.requireOnStdout(outputParser("RootScan"))
.execute();
}).then((output) => {
// NOTE: Ignore the header, for now
return output.result.devices;
});
}
};

@ -0,0 +1,72 @@
"use strict";
require("@babel/register");
const Promise = require("bluebird");
const createAPI = require("../src/api");
// const query = {
// hardware: {
// drives: {
// path: true,
// smartHealth: true,
// size: true,
// rpm: true,
// serialNumber: true,
// model: true,
// modelFamily: true,
// firmwareVersion: true,
// blockDevice: {
// name: true
// },
// partitions: {
// $key: "allBlockDevices",
// name: true,
// size: true,
// mounts: {
// mountpoint: true
// },
// children: {
// $recurse: true,
// $recurseLimit: Infinity, // 3 by default
// }
// }
// }
// }
// };
const query = {
resources: {
lvm: {
physicalVolumes: {
path: true,
totalSpace: true,
freeSpace: true,
status: true,
volumeGroup: {
name: true,
totalSpace: true,
freeSpace: true,
logicalVolumeCount: true,
mode: true,
physicalVolumes: {
path: true
}
}
}
}
}
};
const api = createAPI();
return Promise.try(() => {
return api.query(query);
}).then((result) => {
console.dir(result, { depth: null });
});

@ -0,0 +1,15 @@
"use strict";
require("@babel/register");
const Promise = require("bluebird");
const execLvm = require("../src/packages/exec-lvm");
const errorChain = require("error-chain");
return Promise.try(() => {
// return execLvm.getVolumeGroups();
return execLvm.getLogicalVolumes();
}).then((result) => {
console.dir(result, { depth: null });
}).catch((error) => {
console.error(errorChain.render(error));
});

@ -0,0 +1,14 @@
"use strict";
require("@babel/register");
const Promise = require("bluebird");
const createLoaders = require("../src/api/loaders");
const All = require("../src/packages/graphql-interface/symbols/all");
let loaders = createLoaders();
return Promise.try(() => {
return loaders.lvmPhysicalVolumes.load(All);
}).then((result) => {
console.dir(result, { depth: null });
});

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save