in tools/cli/admin_elastic_search_commands.go [308:443]
func GenerateReport(c *cli.Context) {
// use url command argument to create client
index := getRequiredOption(c, FlagIndex)
sql := getRequiredOption(c, FlagListQuery)
var reportFormat, reportFilePath string
if c.IsSet(FlagOutputFormat) {
reportFormat = c.String(FlagOutputFormat)
}
if c.IsSet(FlagOutputFilename) {
reportFilePath = c.String(FlagOutputFilename)
} else {
reportFilePath = "./report." + reportFormat
}
esClient := cFactory.ElasticSearchClient(c)
ctx := context.Background()
// convert sql to dsl
e := esql.NewESql()
e.SetCadence(true)
e.ProcessQueryValue(timeKeyFilter, timeValProcess)
dsl, sortFields, err := e.ConvertPrettyCadence(sql, "")
if err != nil {
ErrorAndExit("Fail to convert sql to dsl", err)
}
// query client
resp, err := esClient.Search(index).Source(dsl).Do(ctx)
if err != nil {
ErrorAndExit("Fail to talk with ES", err)
}
// Show result to terminal
table := tablewriter.NewWriter(os.Stdout)
var headers []string
var groupby, bucket map[string]interface{}
var buckets []interface{}
err = json.Unmarshal(*resp.Aggregations["groupby"], &groupby)
if err != nil {
ErrorAndExit("Fail to parse groupby", err)
}
buckets = groupby["buckets"].([]interface{})
if len(buckets) == 0 {
fmt.Println("no matching bucket")
return
}
// get the FIRST bucket in bucket list to extract all tags. These extracted tags are to be used as table heads
bucket = buckets[0].(map[string]interface{})
// record the column position in the table of each returned item
ids := make(map[string]int)
// We want these 3 columns shows at leftmost of the table in cadence report usage. It can be changed in future.
primaryCols := []string{"group_DomainID", "group_WorkflowType", "group_CloseStatus"}
primaryColsMap := map[string]int{
"group_DomainID": 1,
"group_WorkflowType": 1,
"group_CloseStatus": 1,
}
buckKeys := 0 // number of bucket keys, used for table collapsing in html report
if v, exist := bucket["key"]; exist {
vmap := v.(map[string]interface{})
// first search whether primaryCols keys exist, if found, put them at the table beginning
for _, k := range primaryCols {
if _, exist := vmap[k]; exist {
k = trimBucketKey(k) // trim the unnecessary prefix
headers = append(headers, k)
ids[k] = len(ids)
buckKeys++
}
}
// extract all remaining bucket keys
for k := range vmap {
if _, exist := primaryColsMap[k]; !exist {
k = trimBucketKey(k)
headers = append(headers, k)
ids[k] = len(ids)
buckKeys++
}
}
}
// extract all other non-key items and set the table head accordingly
for k := range bucket {
if k != "key" {
if k == "doc_count" {
k = "count"
}
headers = append(headers, k)
ids[k] = len(ids)
}
}
table.SetHeader(headers)
// read each bucket and fill the table, use map ids to find the correct spot
var tableData [][]string
for _, b := range buckets {
bucket = b.(map[string]interface{})
data := make([]string, len(headers))
for k, v := range bucket {
switch k {
case "key": // fill group key
vmap := v.(map[string]interface{})
for kk, vv := range vmap {
kk = trimBucketKey(kk)
data[ids[kk]] = fmt.Sprintf("%v", vv)
}
case "doc_count": // fill bucket size count
data[ids["count"]] = fmt.Sprintf("%v", v)
default:
var datum string
vmap := v.(map[string]interface{})
if strings.Contains(k, "Attr_CustomDatetimeField") {
datum = fmt.Sprintf("%v", vmap["value_as_string"])
} else {
datum = fmt.Sprintf("%v", vmap["value"])
// convert Cadence stored time (unix nano) to readable format
if strings.Contains(k, "Time") && !strings.Contains(k, "Attr_") {
datum = toTimeStr(datum)
}
}
data[ids[k]] = datum
}
}
table.Append(data)
tableData = append(tableData, data)
}
table.Render()
switch reportFormat {
case "html", "HTML":
sorted := len(sortFields) > 0 || strings.Contains(sql, "ORDER BY") || strings.Contains(sql, "order by")
generateHTMLReport(reportFilePath, buckKeys, sorted, headers, tableData)
case "csv", "CSV":
generateCSVReport(reportFilePath, headers, tableData)
default:
ErrorAndExit(fmt.Sprintf(`Report format %v not supported.`, reportFormat), nil)
}
}