Commit 7b9b9b2a authored by Szymon Zimnowoda's avatar Szymon Zimnowoda
Browse files

Merge branch 'sz/migration_poc' into 'dev'

Sz/migration poc

See merge request !445
parents e11968bb 2175bba6
Showing with 345 additions and 464 deletions
+345 -464
......@@ -373,18 +373,18 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
]
[[package]]
name = "async-trait"
version = "0.1.68"
version = "0.1.73"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842"
checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
]
[[package]]
......@@ -685,7 +685,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
]
[[package]]
......@@ -1117,7 +1117,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
]
[[package]]
......@@ -1567,6 +1567,7 @@ version = "0.5.1"
dependencies = [
"argon2",
"async-recursion",
"async-trait",
"bip39",
"chacha20poly1305",
"clap 4.3.0",
......@@ -1588,7 +1589,6 @@ dependencies = [
"r2d2",
"r2d2_sqlite",
"rand",
"refinery",
"regex",
"reqwest",
"rusqlite",
......@@ -1888,7 +1888,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
]
[[package]]
......@@ -1988,7 +1988,7 @@ checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
]
[[package]]
......@@ -2111,18 +2111,18 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.58"
version = "1.0.67"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8"
checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.27"
version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500"
checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
dependencies = [
"proc-macro2",
]
......@@ -2225,50 +2225,6 @@ dependencies = [
"bitflags 1.3.2",
]
[[package]]
name = "refinery"
version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb0436d0dd7bd8d4fce1e828751fa79742b08e35f27cfea7546f8a322b5ef24"
dependencies = [
"refinery-core",
"refinery-macros",
]
[[package]]
name = "refinery-core"
version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19206547cd047e8f4dfa6b20c30d3ecaf24be05841b6aa0aa926a47a3d0662bb"
dependencies = [
"async-trait",
"cfg-if",
"lazy_static",
"log",
"regex",
"rusqlite",
"serde",
"siphasher",
"thiserror",
"time",
"toml",
"url",
"walkdir",
]
[[package]]
name = "refinery-macros"
version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d94d4b9241859ba19eaa5c04c86e782eb3aa0aae2c5868e0cfa90c856e58a174"
dependencies = [
"proc-macro2",
"quote",
"refinery-core",
"regex",
"syn 2.0.16",
]
[[package]]
name = "regex"
version = "1.8.2"
......@@ -2560,7 +2516,7 @@ checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
]
[[package]]
......@@ -2584,15 +2540,6 @@ dependencies = [
"serde",
]
[[package]]
name = "serde_spanned"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d"
dependencies = [
"serde",
]
[[package]]
name = "serde_urlencoded"
version = "0.7.1"
......@@ -2677,12 +2624,6 @@ dependencies = [
"libc",
]
[[package]]
name = "siphasher"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de"
[[package]]
name = "slab"
version = "0.4.8"
......@@ -2739,9 +2680,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.16"
version = "2.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01"
checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8"
dependencies = [
"proc-macro2",
"quote",
......@@ -2819,7 +2760,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
]
[[package]]
......@@ -2912,7 +2853,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
]
[[package]]
......@@ -2960,40 +2901,6 @@ dependencies = [
"tracing",
]
[[package]]
name = "toml"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit",
]
[[package]]
name = "toml_datetime"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
version = "0.19.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f"
dependencies = [
"indexmap",
"serde",
"serde_spanned",
"toml_datetime",
"winnow",
]
[[package]]
name = "tower-service"
version = "0.3.2"
......@@ -3021,7 +2928,7 @@ checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
]
[[package]]
......@@ -3269,7 +3176,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
"wasm-bindgen-shared",
]
......@@ -3303,7 +3210,7 @@ checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.16",
"syn 2.0.37",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
......@@ -3539,15 +3446,6 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
[[package]]
name = "winnow"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699"
dependencies = [
"memchr",
]
[[package]]
name = "winreg"
version = "0.10.1"
......
......@@ -28,7 +28,6 @@ r2d2_sqlite = "0.22.0"
r2d2 = "0.8.10"
rand = { version = "0.8.4", features = ["getrandom", "std_rng"] }
regex = "1.5.4"
refinery = { version = "0.8.10", features = ["rusqlite"] }
reqwest = { workspace = true, features = ["blocking", "json"] }
# `bundled` causes us to automatically compile and link in an up to date
......@@ -61,6 +60,7 @@ validator = {workspace = true }
bip39 = {version = "2.0.0", features = ["rand"]}
uuid = {workspace = true}
async-recursion = "1.0.4"
async-trait = "0.1.73"
[dev-dependencies]
criterion = { version = "0.3.5", features = ["async_tokio"] }
......
-- This is an example data that you can insert into Pod for testing purposes.
-- Use it for example as:
-- sqlcipher -cmd "PRAGMA key = \"x'2DD29CA851E7B56E4697B0E1F08507293D761A05CE4D1B628663F411A8086D99'\";" data/db/*.db < res/example_data.sql
INSERT INTO items(rowid, id, type, dateCreated, dateModified, dateServerModified, deleted) VALUES(19000, "id-person-age", "ItemPropertySchema", 0, 0, 0, 0);
INSERT INTO strings(item, name, value) VALUES(19000, "itemType", "Person");
INSERT INTO strings(item, name, value) VALUES(19000, "propertyName", "age");
INSERT INTO strings(item, name, value) VALUES(19000, "valueType", "Integer");
INSERT INTO items(rowid, id, type, dateCreated, dateModified, dateServerModified, deleted) VALUES(19001, "id-person-name", "ItemPropertySchema", 0, 0, 0, 0);
INSERT INTO strings(item, name, value) VALUES(19001, "itemType", "Person");
INSERT INTO strings(item, name, value) VALUES(19001, "propertyName", "name");
INSERT INTO strings(item, name, value) VALUES(19001, "valueType", "Text");
INSERT INTO items(rowid, id, type, dateCreated, dateModified, dateServerModified, deleted) VALUES(19002, "first-id", "Person", 0, 0, 0, 0);
INSERT INTO integers(item, name, value) VALUES(19002, "age", 20);
INSERT INTO strings(item, name, value) VALUES(19002, "firstName", "Ada");
INSERT INTO strings(item, name, value) VALUES(19002, "lastName", "Lovelace");
SELECT item.type, item.id, valueType.name, valueType.value FROM items as item, strings as valueType WHERE valueType.item = item.rowid;
SELECT item.type, item.id, valueType.name, valueType.value FROM items as item, strings as valueType WHERE item.type = "ItemPropertySchema" AND valueType.item = item.rowid;
SELECT propertyName.value, valueType.value
FROM
items as item,
strings as propertyName,
strings as valueType
WHERE item.type = 'ItemPropertySchema'
AND propertyName.item = item.rowid
AND valueType.item = item.rowid
AND propertyName.name = 'propertyName'
AND valueType.name = 'valueType';
-- TODO: Node/edge definitions should be done using internal_api calls, migration steps use just for meta tables, like id_registry
CREATE TABLE Oauth2Flow (
id TEXT NOT NULL PRIMARY KEY,
dateCreated INTEGER NOT NULL,
dateModified INTEGER NOT NULL,
dateServerModified INTEGER NOT NULL,
-- props
accessToken TEXT NOT NULL,
refreshToken TEXT NOT NULL,
tokenType TEXT NOT NULL,
redirectUri TEXT NOT NULL,
platform TEXT NOT NULL,
expiresAfter INTEGER NOT NULL
) STRICT;
CREATE TRIGGER _Oauth2Flow_insert_trigger
AFTER
INSERT
ON Oauth2Flow BEGIN
INSERT INTO
_IdRegistry(id, table_name, table_rowid)
VALUES
(NEW.id, 'Oauth2Flow', NEW.rowid);
END;
CREATE TRIGGER _Oauth2Flow_delete_trigger
AFTER
DELETE ON Oauth2Flow BEGIN
DELETE FROM
_IdRegistry
WHERE
_IdRegistry.id = OLD.id;
END;
-- TODO: create function as migration step that creates given table with base props, and sets up the triggers?
-- TODO: would it be named _POD_Oauth2Flow????
CREATE TABLE PodUserAccount (
id TEXT NOT NULL PRIMARY KEY,
dateCreated INTEGER NOT NULL,
dateModified INTEGER NOT NULL,
dateServerModified INTEGER NOT NULL,
-- props
loginHash TEXT NOT NULL,
passwordHash TEXT NOT NULL,
salt TEXT NOT NULL,
state TEXT NOT NULL,
encryptedPassword TEXT NOT NULL,
code TEXT NOT NULL
) STRICT;
-- TODO: keep consistent naming
CREATE INDEX _POD_PodUserAccount_loginHashIdx ON PodUserAccount(loginHash);
CREATE TRIGGER _PodUserAccount_insert_trigger
AFTER
INSERT
ON PodUserAccount BEGIN
INSERT INTO
_IdRegistry(id, table_name, table_rowid)
VALUES
(NEW.id, 'PodUserAccount', NEW.rowid);
END;
CREATE TRIGGER _PodUserAccount_delete_trigger
AFTER
DELETE ON PodUserAccount BEGIN
DELETE FROM
_IdRegistry
WHERE
_IdRegistry.id = OLD.id;
END;
-- TODO: create function as migration step that creates given table with base props, and sets up the triggers?
-- TODO: would it be named _POD_PodUserAccount????
\ No newline at end of file
CREATE TABLE File (
id TEXT NOT NULL PRIMARY KEY,
dateCreated INTEGER NOT NULL,
dateModified INTEGER NOT NULL,
dateServerModified INTEGER NOT NULL,
-- props
sha256 TEXT NOT NULL,
cipherKey TEXT,
nonce TEXT
) STRICT;
-- TODO: keep consistent naming
CREATE INDEX _POD_File_sha256Idx ON File(sha256);
CREATE TRIGGER _File_insert_trigger
AFTER
INSERT
ON File BEGIN
INSERT INTO
_IdRegistry(id, table_name, table_rowid)
VALUES
(NEW.id, 'File', NEW.rowid);
END;
CREATE TRIGGER _File_delete_trigger
AFTER
DELETE ON File BEGIN
DELETE FROM
_IdRegistry
WHERE
_IdRegistry.id = OLD.id;
END;
-- TODO: create function as migration step that creates given table with base props, and sets up the triggers?
-- TODO: would it be named _POD_File????
\ No newline at end of file
CREATE TABLE PluginRun (
id TEXT NOT NULL PRIMARY KEY,
dateCreated INTEGER NOT NULL,
dateModified INTEGER NOT NULL,
dateServerModified INTEGER NOT NULL,
-- props
containerImage TEXT NOT NULL,
targetItemId TEXT NOT NULL,
webserverPort INTEGER NOT NULL,
webserverUrl TEXT NOT NULL,
pluginModule TEXT NOT NULL,
pluginName TEXT NOT NULL,
status TEXT,
containerId TEXT NOT NULL,
pluginAlias TEXT,
uniqueId TEXT,
executeOn TEXT
) STRICT;
CREATE TRIGGER _PluginRun_insert_trigger
AFTER
INSERT
ON PluginRun BEGIN
INSERT INTO
_IdRegistry(id, table_name, table_rowid)
VALUES
(NEW.id, 'PluginRun', NEW.rowid);
END;
CREATE TRIGGER _PluginRun_delete_trigger
AFTER
DELETE ON PluginRun BEGIN
DELETE FROM
_IdRegistry
WHERE
_IdRegistry.id = OLD.id;
END;
-- TODO: create function as migration step that creates given table with base props, and sets up the triggers?
-- TODO: would it be named _POD_PluginRun????
\ No newline at end of file
-- Trigger {
-- action: "path/to/plugin/endpoint",
-- filterCreatedAfter: "DateTime INTEGER!"
-- filterCreatedAfterPropertyName: "string"
-- pluginRunId: "uuidv4",
-- triggerOn: "Type of item, for example, Message"
-- }
CREATE TABLE Trigger (
id TEXT NOT NULL PRIMARY KEY,
dateCreated INTEGER NOT NULL,
dateModified INTEGER NOT NULL,
dateServerModified INTEGER NOT NULL,
-- props
action TEXT NOT NULL,
filterCreatedAfter INTEGER NOT NULL,
filterCreatedAfterPropertyName TEXT NOT NULL,
pluginRunId TEXT NOT NULL,
triggerOn TEXT NOT NULL
) STRICT;
CREATE TRIGGER _Trigger_insert_trigger
AFTER
INSERT
ON Trigger BEGIN
INSERT INTO
_IdRegistry(id, table_name, table_rowid)
VALUES
(NEW.id, 'Trigger', NEW.rowid);
END;
CREATE TRIGGER _Trigger_delete_trigger
AFTER
DELETE ON Trigger BEGIN
DELETE FROM
_IdRegistry
WHERE
_IdRegistry.id = OLD.id;
END;
INSERT INTO _MetaTypesRegistry(table_name, column_name, type_id)
VALUES ('Trigger', 'filterCreatedAfter', 2);
-- TODO: create function as migration step that creates given table with base props, and sets up the triggers?
-- TODO: would it be named _POD_Trigger????
\ No newline at end of file
CREATE TABLE Trigger_trigger_PluginRun (
id TEXT NOT NULL PRIMARY KEY,
source TEXT NOT NULL,
target TEXT NOT NULL,
CONSTRAINT source_cascade_removal FOREIGN KEY(source) REFERENCES Trigger(id) ON DELETE CASCADE,
CONSTRAINT target_cascade_removal FOREIGN KEY(target) REFERENCES PluginRun(id) ON DELETE CASCADE,
UNIQUE(source, target)
) STRICT;
-- index for searching in edges, (out edges already covered by UNIQUE(source, target) index)
CREATE INDEX Trigger_trigger_PluginRun_idx_target_source ON Trigger_trigger_PluginRun(target, source);
CREATE TRIGGER _Trigger_trigger_PluginRun_insert_trigger
AFTER
INSERT
ON Trigger_trigger_PluginRun BEGIN
INSERT INTO
_IdRegistry(id, table_name, table_rowid)
VALUES
(NEW.id, 'Trigger_trigger_PluginRun', NEW.rowid);
END;
CREATE TRIGGER _Trigger_trigger_PluginRun_delete_trigger
AFTER
DELETE ON Trigger_trigger_PluginRun BEGIN
DELETE FROM
_IdRegistry
WHERE
_IdRegistry.id = OLD.id;
END;
-- TODO: create function as migration step that creates given table with base props, and sets up the triggers?
-- TODO: would it be named _POD_Trigger????
\ No newline at end of file
use crate::error::Result;
use refinery::Runner;
use rusqlite::Connection;
pub mod embedded {
use refinery::embed_migrations;
embed_migrations!("./res/migrations");
}
/// Run "refinery" migrations to bring the core structure of items/edges up-to-date
pub fn migrate(conn: &mut Connection) -> Result<()> {
let runner: Runner = embedded::migrations::runner();
runner.run(conn).map(|_report| ()).map_err(|e| e.into())
}
......@@ -2,11 +2,12 @@ use crate::{
any_error,
api_model::PodOwner,
async_db_connection::AsyncConnection,
command_line_interface, constants, database_migrate_refinery,
command_line_interface, constants,
error::{ErrorContext, Result},
global_static::get_cached_schema,
internal_error,
plugin_auth_crypto::{DatabaseKey, SHA256Output},
v5::migrations_api,
};
use futures::future::join_all;
use http::StatusCode;
......@@ -145,18 +146,19 @@ async fn initialize_new_pool(owner: &str, database_key: &DatabaseKey) -> Result<
// Try to query the db to see if it works
check_connection(&mut conn)?;
// Apply migrations.
// Do it under the lock, so it's guaranteed all connections will see a database
// with migrations in place.
database_migrate_refinery::migrate(&mut conn)?;
get_cached_schema()
.init(&mut AsyncConnection::new(
owner
.parse()
.map_err(|e| internal_error!("Failed to parse owner key {e}"))?,
conn,
))
.await?;
let mut async_conn = AsyncConnection::new(
owner
.parse()
.map_err(|e| internal_error!("Failed to parse owner key {e}"))?,
conn,
);
migrations_api::update_schema(&mut async_conn).await?;
get_cached_schema().init(&mut async_conn).await?;
Ok(pool)
}
......
......@@ -192,7 +192,6 @@ macro_rules! convert_to_other_error {
// Errors that are so rare in codebase are not worth to add to the enum
convert_to_other_error!(hex::FromHexError, "Error converting from hex");
convert_to_other_error!(refinery::Error, "Database 'refinery' migration error");
convert_to_other_error!(tokio::task::JoinError, "Async task join error");
convert_to_other_error!(std::num::TryFromIntError, "Parsing integer error");
convert_to_other_error!(std::env::VarError, "Env var error");
......
......@@ -2,7 +2,6 @@ pub mod api_model;
pub mod async_db_connection;
pub mod command_line_interface;
pub mod constants;
pub mod database_migrate_refinery;
pub mod database_pool;
pub mod database_utils;
mod db_model;
......
......@@ -7,8 +7,8 @@ use std::{
use uuid::Uuid;
use crate::{
async_db_connection::AsyncConnection, command_line_interface::CliOptions,
database_migrate_refinery, error::Result, global_static::get_cached_schema,
async_db_connection::AsyncConnection, command_line_interface::CliOptions, error::Result,
global_static::get_cached_schema, v5::migrations_api,
};
pub async fn get_memory_connection() -> Result<AsyncConnection> {
......@@ -19,16 +19,16 @@ pub async fn get_memory_connection() -> Result<AsyncConnection> {
.connection_timeout(Duration::from_secs(5))
.build(manager)?;
let mut conn = pool.get()?;
let conn = pool.get()?;
conn.execute_batch(
"
PRAGMA foreign_keys = ON;
",
)?;
database_migrate_refinery::migrate(&mut conn)?;
let mut conn = AsyncConnection::new(Uuid::new_v4().simple().to_string().into(), conn);
migrations_api::update_schema(&mut conn).await?;
get_cached_schema().init(&mut conn).await?;
......
......@@ -32,7 +32,10 @@ use crate::{
api_model::PodOwner,
async_db_connection::AsyncConnection,
internal_error,
v5::api_model::types::{ItemId, NodeName},
v5::{
api_model::types::{ItemId, NodeName},
schema::{SchemaInfo, SchemaVersion, SchemasInfo},
},
};
use lazy_static::lazy_static;
use rusqlite::OptionalExtension;
......@@ -59,10 +62,7 @@ use crate::{
},
};
use super::common::{
is_edge_type, split_edge_type_name, ID, ID_REGISTRY, TABLE_NAME, TABLE_ROWID, TYPES,
TYPES_REGISTRY,
};
use super::common::{is_edge_type, split_edge_type_name, ID, ID_REGISTRY, TABLE_NAME, TABLE_ROWID};
#[derive(Default)]
pub struct CachedSchema {
......@@ -109,33 +109,20 @@ pub(super) async fn get_schema_tx(tx: &AsyncTx) -> Result<Schema> {
let mut schema = Schema::empty();
// TODO: refinery_schema_history fits in edge table name definition, is it a problem?
// problem will be when user will create nodes refinery, history, and edge schema that connects them, lol
// think about other special character? or create edges with double underscore? like Person__likes__Person?
// Get all tables, except internal ones, like sqlite_ and refinery_
// Get all tables, except internal ones, like sqlite_*, _IdRegistry
let mut stmt = tx
.prepare_cached(
"SELECT name FROM sqlite_schema
WHERE
type = 'table'
AND name NOT LIKE ?
AND name NOT LIKE ?
AND name NOT LIKE ?
AND name NOT LIKE ?
AND name NOT LIKE ?",
AND name NOT LIKE 'sqlite^_%' ESCAPE '^'
AND name NOT LIKE '^_%' ESCAPE '^'
",
)
.context(|| "While preparing query to get schema".to_string())?;
// TODO: instead of adding ID_REGISTRY, etc, add prefix: _POD, and do NOT_LIKE _POD_%
let mut type_rows = stmt
.query([
"sqlite_%",
"refinery_schema_history",
ID_REGISTRY,
TYPES_REGISTRY,
TYPES,
])
.query([])
.context(|| "While querying for schema".to_string())?;
while let Some(type_row) = type_rows.next()? {
......@@ -157,6 +144,8 @@ pub(super) async fn get_schema_tx(tx: &AsyncTx) -> Result<Schema> {
}
}
schema.info = get_schema_info(tx).await?;
Ok(schema)
}
......@@ -538,6 +527,48 @@ pub fn get_type_name_from_id(tx: &AsyncTx, id: &ItemId) -> Result<Option<(String
Ok(type_name)
}
pub async fn get_schema_info(tx: &AsyncTx) -> Result<SchemasInfo> {
let mut stmt =
tx.prepare_cached("SELECT name, version_major, version_minor, url FROM _SchemaInfo")?;
let mut rows = stmt.query([])?;
let mut infos = SchemasInfo::new();
while let Some(row) = rows.next()? {
let info = SchemaInfo {
version: SchemaVersion {
major: row.get("version_major")?,
minor: row.get("version_minor")?,
},
url: row.get("url")?,
};
infos.insert(row.get("name")?, info);
}
Ok(infos)
}
pub async fn store_schema_info(tx: &AsyncTx, schemas_info: SchemasInfo) -> Result<()> {
debug!("Storing schema info...");
let mut stmt = tx.prepare(
"INSERT OR REPLACE INTO _SchemaInfo(name, version_major, version_minor, url)
VALUES(?,?,?,?)
",
)?;
for (schema_name, info) in schemas_info {
stmt.execute(rusqlite::params![
&schema_name,
info.version.major,
info.version.minor,
&info.url,
])?;
}
Ok(())
}
#[cfg(test)]
mod tests {
......
-- refinery does not support two directories for migration
-- if you want to use v5 schema, put this dir under migrations dir
-- and refinery will get it's content into account
-- NOTE: two schemas will be present in the database
-- v4 with items, strings, integers etc tables
-- v5 with everything else
-- beware name clashes!
-- drop old migration table
DROP TABLE IF EXISTS refinery_schema_history;
-- mapping between id and type it defines
-- Why having global id registry:
......@@ -15,7 +10,7 @@
-- Ensures id is unique across tables
-- TODO: id to be primary key simply?
CREATE TABLE _IdRegistry(
CREATE TABLE IF NOT EXISTS _IdRegistry(
id TEXT NOT NULL,
table_name TEXT NOT NULL,
table_rowid INTEGER NOT NULL,
......@@ -24,7 +19,7 @@ CREATE TABLE _IdRegistry(
) STRICT;
-- create cover index so everything is fetched from the index table, spares lookup in the IdRegistry itself
-- TODO: query planner is stubborn enough to not use it
CREATE INDEX _IdRegistry_idx_id_table_name_table_rowid ON _IdRegistry(id, table_name, table_rowid);
CREATE INDEX IF NOT EXISTS _IdRegistry_idx_id_table_name_table_rowid ON _IdRegistry(id, table_name, table_rowid);
-- sqlite does not have dedicated types for bool, date, json, list, maps, etc, they affine to Integer, Text types.
-- POD has to track intended type in order to not loose this information, simple example:
......@@ -32,7 +27,7 @@ CREATE INDEX _IdRegistry_idx_id_table_name_table_rowid ON _IdRegistry(id, table_
-- get node -> without type tracking POD would return isActive: 1, instead of true
-- Table containing meta types
CREATE TABLE _MetaTypes(
CREATE TABLE IF NOT EXISTS _MetaTypes(
id INTEGER PRIMARY KEY,
type_name TEXT NOT NULL,
......@@ -40,7 +35,7 @@ CREATE TABLE _MetaTypes(
) STRICT;
-- In the future extend to list, maps, etc
INSERT INTO _MetaTypes(type_name)
INSERT OR IGNORE INTO _MetaTypes(type_name)
VALUES
('bool'), -- 1
('datetime'); -- 2
......@@ -50,9 +45,16 @@ VALUES
-- Meta types are rare, so to avoid dozen of tables containing just one or two rows, keep everything in one.
-- In potential future, when meta types would become very common we can split it to dedicated tables
-- per node type, like _MetaTypesNode1(col_name, type_name)
CREATE TABLE _MetaTypesRegistry (
CREATE TABLE IF NOT EXISTS _MetaTypesRegistry (
table_name TEXT NOT NULL,
column_name TEXT NOT NULL,
type_id INTEGER NOT NULL,
FOREIGN KEY(type_id) REFERENCES _MetaTypes(id),
UNIQUE(table_name, column_name));
CREATE TABLE IF NOT EXISTS _SchemaInfo(
name TEXT PRIMARY KEY,
version_major INTEGER NOT NULL,
version_minor INTEGER NOT NULL,
url TEXT NOT NULL
) STRICT;
//! Using api-level migration api, not SQL-level (like migration_refinery)
//! For following reasons:
//! - Global breaking-changes are not possible from SQL level, because schema in POD is dynamic,
//! different users has different tables, SQL is not Turing-complete, cannot do queries loops, etc
//! - Creating nodes and edges via API ensures all heavy lifting (like creating triggers) is done always
//! and done correctly
//! - Code is cleaner
//!
mod pod_schema;
use async_trait::async_trait;
use tracing::info;
use crate::{
async_db_connection::{AsyncConnection, AsyncTx},
error::Result,
v5::database_api::graph_schema,
};
use super::schema::SchemaVersion;
/// POD service is responsible for maintenance of POD specific types and CentralSchema
pub async fn update_schema(conn: &mut AsyncConnection) -> Result<()> {
info!("Checking POD schema definitions for update...");
conn.in_write_transaction(|tx: AsyncTx| async move {
let mut schema_info = graph_schema::get_schema_info(&tx).await.unwrap_or_default();
pod_schema::update_schema(&tx, &mut schema_info).await?;
graph_schema::store_schema_info(&tx, schema_info).await
})
.await
}
#[async_trait(?Send)]
pub trait Migration {
async fn migrate(&self, tx: &AsyncTx) -> Result<()> {
info!("Applying {}...", self.version());
self.do_migrate(tx).await
}
async fn do_migrate(&self, tx: &AsyncTx) -> Result<()>;
fn version(&self) -> SchemaVersion;
}
#[cfg(test)]
mod tests {
use crate::{
test_helpers::get_memory_connection, v5::database_api::common::tests::get_schema_with_conn,
};
use super::*;
#[tokio::test]
async fn clean_db_has_pod_types_defined() {
let mut conn = get_memory_connection().await.unwrap();
let schema = get_schema_with_conn(&mut conn).await.unwrap();
assert_eq!(
schema.info.get("pod").unwrap().version,
SchemaVersion { major: 0, minor: 2 }
);
assert!(schema.nodes().get("PodUserAccount").is_some());
assert!(schema.nodes().get("PluginRun").is_some());
assert!(schema.nodes().get("Trigger").is_some());
}
}
use async_trait::async_trait;
use serde_json::json;
use super::Migration;
use crate::error::Result;
use crate::v5::internal_api;
use crate::v5::schema::{SchemaInfo, SchemaVersion, SchemasInfo};
use crate::{async_db_connection::AsyncTx, error::ErrorContext};
use tracing::info;
pub const NAME: &str = "pod";
const MIGRATIONS: [&dyn Migration; 2] = [&V0_1, &V0_2];
pub async fn update_schema(tx: &AsyncTx, info: &mut SchemasInfo) -> Result<()> {
let pod_schema_info = info.entry(NAME.to_string()).or_insert(SchemaInfo {
version: SchemaVersion { major: 0, minor: 0 },
url: "https://gitlab.memri.io/memri/pod".to_string(),
});
info!("POD schema version: {}", pod_schema_info.version);
for migration in MIGRATIONS {
if migration.version() > pod_schema_info.version {
info!(
"Updating POD schema from {} to {}",
pod_schema_info.version,
migration.version()
);
migration
.migrate(tx)
.await
.context(|| format!("While applying migration {}", migration.version()))?;
pod_schema_info.version = migration.version();
}
}
Ok(())
}
pub struct V0_1;
#[async_trait(?Send)]
impl Migration for V0_1 {
async fn do_migrate(&self, tx: &AsyncTx) -> Result<()> {
let sql = include_str!("V1__initial.sql");
tx.execute_batch(sql)?;
Ok(())
}
fn version(&self) -> SchemaVersion {
SchemaVersion { major: 0, minor: 1 }
}
}
pub struct V0_2;
#[async_trait(?Send)]
impl Migration for V0_2 {
async fn do_migrate(&self, tx: &AsyncTx) -> Result<()> {
// TODO: missing constrains, like NOT NULL, UNIQUE, PRIMARY KEY
internal_api::create_schema_tx(
tx,
&serde_json::from_value(json!({
"nodes": {
"Oauth2Flow": {
"accessToken": "Text",
"refreshToken": "Text",
"tokenType": "Text",
"redirectUri": "Text",
"platform": "Text",
"expiresAfter": "Integer",
},
"PodUserAccount": {
"loginHash": "Text",
"passwordHash": "Text",
"salt": "Text",
"state": "Text",
"encryptedPassword": "Text",
"code": "Text"
},
"File": {
"sha256": "Text",
"cipherKey": "Text",
"nonce": "Text"
},
"PluginRun": {
"containerImage": "Text",
"targetItemId": "Text",
"webserverPort": "Integer",
"webserverUrl": "Text",
"pluginModule": "Text",
"pluginName": "Text",
"status": "Text",
"containerId": "Text",
"pluginAlias": "Text",
"uniqueId": "Text",
"executeOn": "Text"
},
"Trigger": {
"action": "Text",
"filterCreatedAfter": "Integer",
"filterCreatedAfterPropertyName": "Text",
"pluginRunId": "Text",
"triggerOn": "Text",
}
},
"edges": {
"trigger": [
{
"source": "Trigger",
"target": "PluginRun"
}
]
}
}))?,
)
.await
}
fn version(&self) -> SchemaVersion {
SchemaVersion { major: 0, minor: 2 }
}
}
......@@ -2,5 +2,6 @@ pub mod api_model;
pub mod database_api;
pub mod db_model;
pub mod internal_api;
pub mod migrations_api;
pub mod schema;
mod test_plugin_behavior;
use serde::Serialize;
use std::{collections::HashMap, convert::TryInto};
use std::{collections::HashMap, convert::TryInto, fmt::Display};
use time::OffsetDateTime;
use super::api_model::types::{
......@@ -14,6 +14,24 @@ use super::api_model::types::{
pub type Connections =
HashMap<ItemName /* source */, HashMap<EdgeName, Vec<ItemName /* Targets */>>>;
pub type SchemasInfo = HashMap<String, SchemaInfo>;
#[derive(Serialize, Clone, Debug)]
pub struct SchemaInfo {
pub version: SchemaVersion,
pub url: String,
}
#[derive(PartialOrd, PartialEq, Debug, Serialize, Clone)]
pub struct SchemaVersion {
pub major: usize,
pub minor: usize,
}
impl Display for SchemaVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}.{}", self.major, self.minor)
}
}
#[derive(Debug, Clone, Serialize)]
pub struct Schema {
/// United properties across all types, for example:
......@@ -26,6 +44,7 @@ pub struct Schema {
/// "Person", "friend" -> "Person"
nodes_connections: Connections,
edges_types: EdgesDefinition,
pub info: SchemasInfo,
}
impl Schema {
......@@ -35,6 +54,7 @@ impl Schema {
nodes_types: HashMap::new(),
nodes_connections: HashMap::new(),
edges_types: HashMap::new(),
info: SchemasInfo::new(),
}
}
......@@ -179,3 +199,27 @@ pub fn utc_millis() -> i64 {
.try_into()
.expect("Current time is outside of Memri DateTime range (unsigned 64-bit integer)")
}
#[cfg(test)]
mod tests {
use super::SchemaVersion;
#[test]
fn shema_version_is_partially_ordered() {
let v00 = SchemaVersion { major: 0, minor: 0 };
let v01 = SchemaVersion { major: 0, minor: 1 };
let v010 = SchemaVersion {
major: 0,
minor: 10,
};
let v10 = SchemaVersion { major: 1, minor: 0 };
let v11 = SchemaVersion { major: 1, minor: 1 };
let v21 = SchemaVersion { major: 2, minor: 1 };
assert!(v00 < v01);
assert!(v01 < v010);
assert!(v010 < v10);
assert!(v10 < v11);
assert!(v11 < v21);
}
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment