{type:"setAttribute",attribute:"required",value:true},// FIXME: Enforce a default in this case! Otherwise existing columns would be invalid -- actually this should be handled by 'migration defaults' specifically, without requiring a default for new records
// FIXME: The below lazy function is currently getting evaluated at schema reduce time, because of the immutable deep merge. *Really* need to work this into merge-by-template instead to prevent cases like this!
{type:"setAttribute",attribute:"defaultValue",value:()=>false},// FIXME: Always specified as a value-producing function, or also allow literals?
value.schema// Extract only the *current* schema, not the transforms
];
});
letcoder=createRecordCoder(fields);
returncoder.encode(data);
}
// console.log(schema);
// MARKER: createRecord takes a schema *array*, because fields need to have a well-defined order for consistent encoding. Need to convert from object format to array format, and also update recordCoder so that it can deal with the new internal schema representation format (eg. no longer a nested `attributes` object)
// NOTE: We default to an empty object for the original value because from the perspective of a deep-merge, any nested paths required by the new input that don't exist in the original input should be imagined into existence.
// FIXME: replace asserts with proper checks and error messages
// TODO: Find a way to roll this into merge-by-template somehow? The main difference is specifying dynamic transforms at rule definition time (and needing to use meta-objects in the mergeable) vs. specifying dynamic transforms at merge time directly
// TODO: Add API for "set this object literally, no merge"
// TODO: Add API for "set this object literally, no merge" -- is that actually necessary, can't we just have a transform function that only returns the new value directly?
// FIXME: Find a way to support arrays? Particularly objects *within* arrays, which would also need to be merged recursively...
thrownewError(`Tried to change '${operation.attribute}' attribute to '${operation.value}', but it's already set to that`);
}
},
transformTo:()=>{
if(state.forwardTransform==null){
state.forwardTransform=operation.transformer;
}else{
// FIXME: Error quality
thrownewError(`You can only specify one transformTo per field per migration`);
}
},
rollbackTo:()=>{
if(state.backwardTransform==null){
state.backwardTransform=operation.transformer;
}else{
// FIXME: Error quality
thrownewError(`You can only specify one rollbackTo per field per migration`);
}
},
forbidRollback:()=>{
state.rollbackForbidden=true;
},
// TODO: rest of operations
});
}
// FIXME: requiresMigrationDefault and requiredAttributes validation, as well as verifying that a type has actually been set by the end of a createField operation
// FIXME: Track exactly which data would be lost upon a rollback so that the user can be asked for confirmation first
functioncreateTransformComputer(){
letautomaticTransformers=[];
@ -124,6 +28,10 @@ function applyFieldOperations(currentField = {}, operations) {
return{
changeType:function(oldType,newType){
if(oldType==null||newType==null){
// We're setting a type for this field for the first time (which should only happen during field creation). This also applies to the inverse computation for rollbacks
// We're setting this attribute on this field for the first time (which should only happen during field creation). This also applies to inverse computation for rollbacks.
// NOTE: Even if a field is not required, it should always be initialized during field creation, using an implicit operation setting the default, so a legitimate revert to `undefined` should never be possible.
// FIXME: How to deal with this when a new attribute is introduced in a new schema DSL version? Should we just pretend that the old one always existed, by updating the old DSL implementation to insert it implicitly as a default? Probably should.
// NOTE: We disallow transformTo/rollbackTo when they are not required; if the user wishes to bulk-transform values, they should specify a changeRecords operation instead (which should probably require the user to specify both the forward and backward transform?). Otherwise, we cannot implement "maybe you forgot a rollbackTo" errors, because that's only actually an error when a transform isn't *required*, and so if a user 'overreaches' in their type transform to also do a value transform we can't detect missing corresponding rollbackTo logic.
// NOTE: There are deliberately duplicate conditional clauses in here to improve readability!
return` - Attribute '${cause.attribute}' changed from '${util.inspect(from)}' to '${util.inspect(to)}'`;
}else{
throwunreachable("Unrecognized cause type");
}
})
.join("\n");
leterrorMessage=(isForward)
?`One or more schema changes for '${fieldName}' cannot be applied automatically, because existing data would lose precision. You need to specify a transformTo operation manually.`
:`One or more schema changes for '${fieldName}' cannot be applied automatically, because rolling back the migration would cause data to lose precision. You need to specify a rollbackTo operation manually.`
// FIXME: Better error message
thrownewError(`One or more schema changes can't be automatically applied, because a lossless automatic conversion of existing values is not possible; you need to specify a ${operationName} operation manually`);
// TODO: Is this the correct order, always letting the manual transformer act on the original value rather than the one post automatic transforms? Probably is, if we want automatic transforms to be transparent to the user (and not produce a leaky abstraction)
@ -163,93 +106,114 @@ function applyFieldOperations(currentField = {}, operations) {
};
}
// NOTE: We disallow transformTo/rollbackTo when they are not required; if the user wishes to bulk-transform values, they should specify a changeRecords operation instead. Otherwise, we cannot implement "maybe you forgot a rollbackTo" errors, because that's only actually an error when a transform isn't *required*, and so if a user 'overreaches' in their type transform to also do a value transform we can't detect missing corresponding rollbackTo logic.
// FIXME: Track destructive forward migrations *and* rollbacks, and outside of dev mode require the user to confirm with eg. an --allow-destructive-rollbacks that this is okay to apply
// FIXME: Currently this implementation assumes that *all* possible attributes are required, and it doesn't deal with cases where the attribute is currently unset. That needs to be changed, especially because new attributes can be changed in later versions of the schema builder, which older migrations won't be using.
// TODO/QUESTION: Maybe all attributes should just be given a default instead of being required? Otherwise over time there'll be a mix of required and optional attributes, the requiredness being determined solely by when the attribute was added to the query builder...
// NOTE: The logic for this is separated out into its own function because a bunch of complexity is needed for determining which attributes can be kept
thrownewError(`You cannot specify a transformTo or rollbackTo operation unless a field type change requires it. Maybe you meant to use changeRecords instead?`);
// FIXME: modifyRecords instead of changeRecords? For consistency with other APIs
// FIXME: Error quality
thrownewError(`You can only specify one rollbackTo per field per migration`);
{type:"setAttribute",attribute:"required",value:true},// FIXME: Enforce a default in this case! Otherwise existing columns would be invalid
{type:"setDefault",value:()=>false},// FIXME: Always specified as a value-producing function, or also allow literals?
{type:"setFieldType",fieldType:"boolean"},
{type:"setAttribute",attribute:"required",value:true},// FIXME: Enforce a default in this case! Otherwise existing columns would be invalid -- actually this should be handled by 'migration defaults' specifically, without requiring a default for new records
// FIXME: The below lazy function is currently getting evaluated at schema reduce time, because of the immutable deep merge. *Really* need to work this into merge-by-template instead to prevent cases like this!
{type:"setAttribute",attribute:"defaultValue",value:()=>false},// FIXME: Always specified as a value-producing function, or also allow literals?
// CAUTION: Once a type is defined here and that version of the code is released, no new requiredAttributes can be added to that type! Otherwise it would be possible for an old migration (from before that change) to become retroactively invalid upon an update of zapdb, just because it was written against an older ruleset. This cannot be fixed by statefully tracking what migrations were *once* considered 'valid', either; all state is kept in the database instance of a specific deployment, which means that it would only be considered valid for deployments created prior to the zapdb upgrade - making database deployment non-deterministic, as a given migration would be considered valid on one system but invalid on another. Bottom line: we can never retroactively invalidate existing migrations.
// TODO: Consider using schema API versions as a workaround for this, and changing the rules format to specify since which version an attribute is required, providing a default for the alternate case. Question is whether by this point it's really useful to even specify new required fields *at all*, or whether we can get away with just specifying defaults.
module.exports={
// Note that this mapping can be used to determine the losslessness of both forward and backward migrations!
losslessTransformer:(value)=>value,// No change to value
requiresMigrationDefault:false
requiresMigrationDefault:false// Whether setting this option causes a 'migration default' to become required to specify? Need to figure out what this was supposed to be for
},
signed:{
validForTypes:newSet(["decimal","integer"]),
@ -67,8 +70,13 @@ module.exports = {
required:{
// Valid for all types
validForTypes:true,
isLossless:true,
isLossless:()=>true,
requiresMigrationDefault:true
},
defaultValue:{// For newly inserted rows, not necessarily for migrated rows! See notes.txt