Out of memory error occurred during a spatial and temporal query using GeowaveInputFormat. The query was over a 1:50M shape file of the country of Brazil over a 6 month period. The same query was successful when using a shape files of Italy.
The error:
java.lang.OutOfMemoryError: Java heap space
at java.util.Arrays.copyOfRange(Arrays.java:2694)
at java.lang.String.(String.java:203)
at java.lang.StringBuilder.toString(StringBuilder.java:405)
at com.google.common.io.GwtWorkarounds$6.toString(GwtWorkarounds.java:227)
at com.google.common.io.BaseEncoding.encode(BaseEncoding.java:165)
at com.google.common.io.BaseEncoding.encode(BaseEncoding.java:145)
at mil.nga.giat.geowave.index.ByteArrayUtils.byteArrayToString(ByteArrayUtils.java:48)
at mil.nga.giat.geowave.accumulo.query.AccumuloConstraintsQuery.addScanIteratorSettings(AccumuloConstraintsQuery.java:174)
at mil.nga.giat.geowave.accumulo.query.AccumuloFilteredIndexQuery.query(AccumuloFilteredIndexQuery.java:94)
at mil.nga.giat.geowave.accumulo.mapreduce.input.GeoWaveRecordReader.initialize(GeoWaveRecordReader.java:176)
at org.apache.spark.rdd.NewHadoopRDD$$anon$1.(NewHadoopRDD.scala:133)
at org.apache.spark.rdd.NewHadoopRDD.compute(NewHadoopRDD.scala:104)
at org.apache.spark.rdd.NewHadoopRDD.compute(NewHadoopRDD.scala:66)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
at org.apache.spark.scheduler.Task.run(Task.scala:64)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Out of memory error occurred during a spatial and temporal query using GeowaveInputFormat. The query was over a 1:50M shape file of the country of Brazil over a 6 month period. The same query was successful when using a shape files of Italy.
The error:
java.lang.OutOfMemoryError: Java heap space
at java.util.Arrays.copyOfRange(Arrays.java:2694)
at java.lang.String.(String.java:203)
at java.lang.StringBuilder.toString(StringBuilder.java:405)
at com.google.common.io.GwtWorkarounds$6.toString(GwtWorkarounds.java:227)
at com.google.common.io.BaseEncoding.encode(BaseEncoding.java:165)
at com.google.common.io.BaseEncoding.encode(BaseEncoding.java:145)
at mil.nga.giat.geowave.index.ByteArrayUtils.byteArrayToString(ByteArrayUtils.java:48)
at mil.nga.giat.geowave.accumulo.query.AccumuloConstraintsQuery.addScanIteratorSettings(AccumuloConstraintsQuery.java:174)
at mil.nga.giat.geowave.accumulo.query.AccumuloFilteredIndexQuery.query(AccumuloFilteredIndexQuery.java:94)
at mil.nga.giat.geowave.accumulo.mapreduce.input.GeoWaveRecordReader.initialize(GeoWaveRecordReader.java:176)
at org.apache.spark.rdd.NewHadoopRDD$$anon$1.(NewHadoopRDD.scala:133)
at org.apache.spark.rdd.NewHadoopRDD.compute(NewHadoopRDD.scala:104)
at org.apache.spark.rdd.NewHadoopRDD.compute(NewHadoopRDD.scala:66)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
at org.apache.spark.scheduler.Task.run(Task.scala:64)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)