Skip to content

Commit ec53e92

Browse files
Yash DattaYash Datta
authored andcommitted
SPARK-3968: No push down should result in case we are unable to create a record filter
1 parent 48163c3 commit ec53e92

1 file changed

Lines changed: 4 additions & 2 deletions

File tree

sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableOperations.scala

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,8 +113,10 @@ case class ParquetTableScan(
113113

114114
// Set this in configuration of ParquetInputFormat, needed for RowGroupFiltering
115115
val filter: Filter = ParquetFilters.createRecordFilter(columnPruningPred)
116-
val filterPredicate = filter.asInstanceOf[FilterPredicateCompat].getFilterPredicate()
117-
ParquetInputFormat.setFilterPredicate(conf, filterPredicate)
116+
if (filter!= null){
117+
val filterPredicate = filter.asInstanceOf[FilterPredicateCompat].getFilterPredicate()
118+
ParquetInputFormat.setFilterPredicate(conf, filterPredicate)
119+
}
118120
}
119121

120122
// Tell FilteringParquetRowInputFormat whether it's okay to cache Parquet and FS metadata

0 commit comments

Comments
 (0)