Goal: Recursively and Generically Flatten a document for integration with a non-NOSQL RDBMS.
-
This function genericFlatten shows how a document can be flattened into a RDBMS importable record.
-
Requires Eventing Storage (or metadata collection), a "source" collection, and a "destination" collection.
-
Will operate on all documents where doc.type === "my_data_to_flatten".
-
Will write transformed or flattened documents to the destination collection with the same type and id.
-
The destination collection can be shared (or replicated via XCDR to a business partner - maybe to AWS, GCP, or Azure).
-
Will also remove the flattened document in the destination collection when the original source document is removed.
-
Note since the document is not available during an OnDelete we will filter by KEY prefix starting with "my_data_to_flatten:".
-
genericFlatten
-
Input Data/Mutation
-
Output Data/Mutation
// To run configure the settings for this Function, genericFlatten, as follows:
//
// Version 7.0+
// "Listen to Location"
// bulk.data.source
// "Eventing Storage"
// rr100.eventing.metadata
// Binding(s)
// 1. "binding type", "alias name...", "bucket.scope.collection", "Access"
// "bucket alias", "dst_col", "bulk.data.destination", "read and write"
//
// Version 6.X
// "Source Bucket"
// source
// "MetaData Bucket"
// metadata
// Binding(s)
// 1. "binding type", "alias name...", "bucket", "Access"
// "bucket alias", "dst_col", "destinaton", "read and write"
function deepFlatten(obj,flatDoc, path) {
if (!obj || typeof obj !== 'object') {
// log("terminal", path, obj);
// remove leading "_" and store the path as a key to the terminal value
flatDoc[path.substr(1)] = obj;
return obj;
}
// process Object
let newObj = {};
if (Array.isArray(obj)) {
for (var i=0; i<= obj.length; i++ ) {
newObj[i] = deepFlatten(obj[i], flatDoc, path + "_" + i)
}
} else {
Object.keys(obj).forEach((k) => {
return newObj[k] = deepFlatten(obj[k], flatDoc, path + "_" + k);
})
}
return newObj;
}
function OnUpdate(doc, meta) {
if (doc.type !== "my_data_to_flatten") return;
var flatDoc = {};
deepFlatten(doc,flatDoc,"");
// alias src_bkt to our source bucket. Since rev 6.5+ allowed to be in r/w mode
dst_col[meta.id] = flatDoc;
}
function OnDelete(meta, options) {
// filter
if (!(meta.id.startsWith("my_data_to_flatten:"))) return;
log("OnDelete expired=" + options.expired +" REM id: "+meta.id);
try {
delete dst_col[meta.id];
} catch (e) {
log("Error on delete: "+e+", id:",meta.id);
}
}
in keyspace bulk.data.source
INPUT: KEY my_data_to_flatten::1001
{
"type": "my_data_to_flatten",
"id": 1001,
"a_first_super_long_name_right_here": 111,
"b": "my object",
"c": "",
"d": null,
"f": {
"v": 1,
"x": "",
"y": null,
"m": {
"a": "asd"
}
},
"f2": {
"v": 1,
"x": "",
"y": null,
"m": {
"a": "asd",
"b": null,
"c": null,
"a_second_super_long_name_right_here": 222,
"ary1": [
null,
1,
null,
2,
null,
3
],
"ary2": [
[
1,
2
],
[
3,
4
],
[
5,
null,
6
],
{
"a": 1
},
{
"b": 2
},
{
"c": 3,
"d": null
},
{
"e": null
},
[
null,
null,
null,
null
]
]
}
},
"a_third_super_long_name_right_here": {
"x": 1,
"y": 2,
"z": null
}
}
in keyspace bulk.data.destination
UPDATED/OUTPUT: KEY my_data_to_flatten::1001
{
"type": "my_data_to_flatten",
"id": 1001,
"a_first_super_long_name_right_here": 111,
"b": "my object",
"c": "",
"d": null,
"f_v": 1,
"f_x": "",
"f_y": null,
"f_m_a": "asd",
"f2_v": 1,
"f2_x": "",
"f2_y": null,
"f2_m_a": "asd",
"f2_m_b": null,
"f2_m_c": null,
"f2_m_a_second_super_long_name_right_here": 222,
"f2_m_ary1_0": null,
"f2_m_ary1_1": 1,
"f2_m_ary1_2": null,
"f2_m_ary1_3": 2,
"f2_m_ary1_4": null,
"f2_m_ary1_5": 3,
"f2_m_ary2_0_0": 1,
"f2_m_ary2_0_1": 2,
"f2_m_ary2_1_0": 3,
"f2_m_ary2_1_1": 4,
"f2_m_ary2_2_0": 5,
"f2_m_ary2_2_1": null,
"f2_m_ary2_2_2": 6,
"f2_m_ary2_3_a": 1,
"f2_m_ary2_4_b": 2,
"f2_m_ary2_5_c": 3,
"f2_m_ary2_5_d": null,
"f2_m_ary2_6_e": null,
"f2_m_ary2_7_0": null,
"f2_m_ary2_7_1": null,
"f2_m_ary2_7_2": null,
"f2_m_ary2_7_3": null,
"a_third_super_long_name_right_here_x": 1,
"a_third_super_long_name_right_here_y": 2,
"a_third_super_long_name_right_here_z": null
}