Commit 9975ecbc authored by Szymon Zimnowoda's avatar Szymon Zimnowoda
Browse files

Merge branch 'sz/bulk_error_reporting' into 'dev'

add context str while doing db write to the db

See merge request !370
parents 99887960 7cbd25e4
Pipeline #11567 passed with stages
in 59 minutes and 52 seconds
Showing with 37 additions and 20 deletions
+37 -20
......@@ -9,7 +9,7 @@ use crate::{
get_incoming_edges, get_outgoing_edges, DatabaseSearch, EdgePointer, GQLSearchArgs, Rowid,
},
database_utils::{add_item_edge_properties, insert_edge, insert_property, item_base_to_json},
error::{Error, Result},
error::{Error, ErrorContext, Result},
graphql_utils,
graphql_utils::QueryASTNode,
schema,
......@@ -226,24 +226,28 @@ pub fn bulk_tx(
bulk.create_edges.len(),
);
let mut created_items = Vec::new();
for item in bulk.create_items {
let id = create_item_tx(tx, schema, item, pod_owner, cli)?;
for (idx, item) in bulk.create_items.into_iter().enumerate() {
let id = create_item_tx(tx, schema, item, pod_owner, cli)
.context(|| format!("While creating the item #{idx}"))?;
created_items.push(id);
}
for item in bulk.update_items {
update_item_tx(tx, schema, &item.id, item.fields)?;
for (idx, item) in bulk.update_items.into_iter().enumerate() {
update_item_tx(tx, schema, &item.id, item.fields)
.context(|| format!("While updating the item #{idx}"))?;
}
for item_id in bulk.delete_items {
delete_item_by_id(tx, &item_id)?;
for (idx, item_id) in bulk.delete_items.into_iter().enumerate() {
delete_item_by_id(tx, &item_id).context(|| format!("While deleting the item #{idx}"))?;
}
let mut created_edges = Vec::new();
for item_id in bulk.create_edges {
let id = create_edge(tx, schema, item_id)?;
for (idx, edge) in bulk.create_edges.into_iter().enumerate() {
let id =
create_edge(tx, schema, edge).context(|| format!("While creating the edge #{idx}"))?;
created_edges.push(id);
}
let mut search_results = Vec::new();
for query in bulk.search {
let result = search(tx, schema, query)?;
for (idx, query) in bulk.search.into_iter().enumerate() {
let result =
search(tx, schema, query).context(|| format!("While making the search #{idx}"))?;
search_results.push(result);
}
......@@ -254,13 +258,13 @@ pub fn bulk_tx(
})
}
pub fn create_edge(tx: &Tx, schema: &mut Schema, query: CreateEdge) -> Result<String> {
pub fn create_edge(tx: &Tx, schema: &mut Schema, edge: CreateEdge) -> Result<String> {
let CreateEdge {
source,
target,
name,
self_id,
} = query;
} = edge;
let date = schema::utc_millis();
let (self_rowid, self_id) = if let Some(id) = self_id {
......
......@@ -3,8 +3,9 @@ use crate::common::pod_request::{deploy_plugin, migrate_plugin_definition, regis
use bollard::container::ListContainersOptions;
use common::test_data::TestData;
use serde_json::json;
use std::collections::HashMap;
use std::{collections::HashMap, time::Duration};
use test_context::test_context;
use tokio::time::timeout;
#[test_context(TestData)]
#[tokio::test]
......@@ -62,12 +63,24 @@ async fn test_start_plugin_by_bulk(ctx: &mut TestData) {
assert_eq!(resp.status(), reqwest::StatusCode::BAD_REQUEST);
let mut containers = ctx
.list_containers(ListContainersOptions {
filters: HashMap::from([("name", vec![ctx.owner_key.as_ref()])]),
..Default::default()
})
.await;
let mut containers = timeout(Duration::from_secs(10), async {
let mut containers = vec![];
while containers.is_empty() {
tokio::time::sleep(Duration::from_secs(1)).await;
containers = ctx
.list_containers(ListContainersOptions {
filters: HashMap::from([("name", vec![ctx.owner_key.as_ref()])]),
..Default::default()
})
.await;
}
containers
})
.await
.unwrap();
// There is one container running
assert_eq!(containers.len(), 1);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment