in lib/protocol/encoder.js [387:451]
encodeBatchWriteRow: function (params) {
if (params.tables === undefined || params.tables === null || params.tables.length <= 0) {
throw new Error('params.tables is not correct');
}
var properties = {
tables: []
};
for (var i = 0; i < params.tables.length; i++) {
var item = {
tableName: params.tables[i].tableName,
rows: []
};
for (var attr in params.tables[i].rows) {
var row = {};
var batchWriteType = params.tables[i].rows[attr].type;
row.type = ~["PUT", "UPDATE", "DELETE"].indexOf(batchWriteType) ? TableStore.BatchWriteType[batchWriteType] : batchWriteType;
switch (batchWriteType) {
case 'PUT':
row.rowChange = TableStore.PlainBufferBuilder.serializeForPutRow(
params.tables[i].rows[attr].primaryKey, params.tables[i].rows[attr].attributeColumns);
break;
case 'UPDATE':
row.rowChange = TableStore.PlainBufferBuilder.serializeForUpdateRow(
params.tables[i].rows[attr].primaryKey, params.tables[i].rows[attr].attributeColumns);
break;
case 'DELETE':
row.rowChange = TableStore.PlainBufferBuilder.serializeForDeleteRow(
params.tables[i].rows[attr].primaryKey, params.tables[i].rows[attr].attributeColumns);
break;
default:
throw new Error('batchwriterow type is error:' + params.tables[i].rows[attr].type);
}
if (params.tables[i].rows[attr].condition) {
row.condition = {};
TableStore.encoder._makeCondition(row.condition, params.tables[i].rows[attr].condition);
}
if (params.tables[i].rows[attr].returnContent &&
params.tables[i].rows[attr].returnContent.returnType) {
row.returnContent = { returnType: params.tables[i].rows[attr].returnContent.returnType };
if (params.tables[i].rows[attr].returnContent.returnColumns) {
row.returnContent.returnColumnNames = params.tables[i].rows[attr].returnContent.returnColumns;
}
}
item.rows.push(row);
}
properties.tables.push(item);
}
if (params.transactionId) {
properties.transactionId = params.transactionId
}
var request = tsProtos.BatchWriteRowRequest.create(properties);
return request;
},