Hi. I am trying to use elasticsearch to store metrics of our cluster. The collection is done using sensu and the values in elasticsearch still have the fields "key","value" and "timestamp" that you would see in graphite. When I create a average/max/min histogram over our metrics I get weird results. The average cpu utilisation of our cluster is 1.22907852E-315 according to elasticsearch. At first glance I suspected a overflow issue, but there are simply not enough results to cause that.
{
"size": 5,
"query": {
"bool": {
"must": [ { "match": { "host": "poolnode-03" } },
{ "match": { "metric": "cpu_metrics" } },
{ "range": {
"@timestamp": {
"gte" : "2014-11-12T17:50:00",
"lte" : "2014-11-12T17:51:00"
}
}
}]
}
},
"aggs": {
"cpu_histogram": {
"date_histogram": {
"field": "@timestamp",
"interval": "5000ms",
"min_doc_count": 1
},
"aggs": {
"avg_cpu": { "avg": { "field": "value" } },
"max_cpu": { "max": { "field": "value" } },
"min_cpu": { "min": { "field": "value" } }
}
}
}
}
{
"took" : 13,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
},
"hits" : {
"total" : 408,
"max_score" : 3.4773107,
"hits" : [ {
"_index" : "sensu",
"_type" : "cpu_metrics",
"_id" : "AUmlIK4cRrcdtiDjFlbQ",
"_score" : 3.4773107,
"_source":{"host":"poolnode-03","metric":"cpu_metrics","name":"poolnode-03.cpu.total.iowait","value":11637,"@timestamp":"2014-11-12T17:50:22+00:00"}
}, {
"_index" : "sensu",
"_type" : "cpu_metrics",
"_id" : "AUmlIK4cRrcdtiDjFlbV",
"_score" : 3.4773107,
"_source":{"host":"poolnode-03","metric":"cpu_metrics","name":"poolnode-03.cpu.cpu0.user","value":15686755,"@timestamp":"2014-11-12T17:50:22+00:00"}
}, {
"_index" : "sensu",
"_type" : "cpu_metrics",
"_id" : "AUmlIK4cRrcdtiDjFlbe",
"_score" : 3.4773107,
"_source":{"host":"poolnode-03","metric":"cpu_metrics","name":"poolnode-03.cpu.cpu1.user","value":12541923,"@timestamp":"2014-11-12T17:50:22+00:00"}
}, {
"_index" : "sensu",
"_type" : "cpu_metrics",
"_id" : "AUmlIK4cRrcdtiDjFlbj",
"_score" : 3.4773107,
"_source":{"host":"poolnode-03","metric":"cpu_metrics","name":"poolnode-03.cpu.cpu1.irq","value":3,"@timestamp":"2014-11-12T17:50:22+00:00"}
}, {
"_index" : "sensu",
"_type" : "cpu_metrics",
"_id" : "AUmlIK4cRrcdtiDjFlbo",
"_score" : 3.4773107,
"_source":{"host":"poolnode-03","metric":"cpu_metrics","name":"poolnode-03.cpu.ctxt","value":4585398711,"@timestamp":"2014-11-12T17:50:22+00:00"}
} ]
},
"aggregations" : {
"cpu_histogram" : {
"buckets" : [ {
"key_as_string" : "2014-11-12T17:50:00.000Z",
"key" : 1415814600000,
"doc_count" : 68,
"min_cpu" : {
"value" : 0.0
},
"avg_cpu" : {
"value" : 1.22906508E-315
},
"max_cpu" : {
"value" : 2.2995982737E-314
}
}, {
"key_as_string" : "2014-11-12T17:50:10.000Z",
"key" : 1415814610000,
"doc_count" : 68,
"min_cpu" : {
"value" : 0.0
},
"avg_cpu" : {
"value" : 1.229071915E-315
},
"max_cpu" : {
"value" : 2.2996008E-314
}
}, {
"key_as_string" : "2014-11-12T17:50:20.000Z",
"key" : 1415814620000,
"doc_count" : 68,
"min_cpu" : {
"value" : 0.0
},
"avg_cpu" : {
"value" : 1.229073614E-315
},
"max_cpu" : {
"value" : 2.29960331E-314
}
}, {
"key_as_string" : "2014-11-12T17:50:30.000Z",
"key" : 1415814630000,
"doc_count" : 68,
"min_cpu" : {
"value" : 0.0
},
"avg_cpu" : {
"value" : 1.22907528E-315
},
"max_cpu" : {
"value" : 2.2996057756E-314
}
}, {
"key_as_string" : "2014-11-12T17:50:40.000Z",
"key" : 1415814640000,
"doc_count" : 68,
"min_cpu" : {
"value" : 0.0
},
"avg_cpu" : {
"value" : 1.229076895E-315
},
"max_cpu" : {
"value" : 2.2996081945E-314
}
}, {
"key_as_string" : "2014-11-12T17:50:50.000Z",
"key" : 1415814650000,
"doc_count" : 68,
"min_cpu" : {
"value" : 0.0
},
"avg_cpu" : {
"value" : 1.22907852E-315
},
"max_cpu" : {
"value" : 2.2996107306E-314
}
} ]
}
}
}
{
"sensu" : {
"mappings" : {
"cpu_metrics" : {
"properties" : {
"@timestamp" : {
"type" : "date",
"format" : "dateOptionalTime"
},
"host" : {
"type" : "string"
},
"metric" : {
"type" : "string"
},
"name" : {
"type" : "string"
},
"value" : {
"type" : "long"
}
}
}
}
}
}
I suspect I misunderstood some of the syntax and the error is on my side, but after studying the documentation for quite a while I still cannot figure out what is wrong with my search.
Hi. I am trying to use elasticsearch to store metrics of our cluster. The collection is done using sensu and the values in elasticsearch still have the fields "key","value" and "timestamp" that you would see in graphite. When I create a average/max/min histogram over our metrics I get weird results. The average cpu utilisation of our cluster is 1.22907852E-315 according to elasticsearch. At first glance I suspected a overflow issue, but there are simply not enough results to cause that.
The json I am using is the following:
The result looks like this:
I checked the mapping of the type and everything looks good:
I suspect I misunderstood some of the syntax and the error is on my side, but after studying the documentation for quite a while I still cannot figure out what is wrong with my search.
Thanks,
Mathias