Skip to content

Commit 4abf2f5

Browse files
committed
reuse testMetadataOnly
1 parent c848508 commit 4abf2f5

File tree

1 file changed

+14
-26
lines changed

1 file changed

+14
-26
lines changed

sql/core/src/test/scala/org/apache/spark/sql/execution/OptimizeMetadataOnlyQuerySuite.scala

Lines changed: 14 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,20 @@ class OptimizeMetadataOnlyQuerySuite extends QueryTest with SharedSparkSession {
103103
"select partcol2, min(partcol1) from srcpart where partcol1 = 0 group by partcol2",
104104
"select max(c1) from (select partcol1 + 1 as c1 from srcpart where partcol1 = 0) t")
105105

106+
testMetadataOnly(
107+
"SPARK-31590 The filter used by Metadata-only queries should not have Unevaluable",
108+
"""
109+
|SELECT partcol1, MAX(partcol2) AS partcol2
110+
|FROM srcpart
111+
|WHERE partcol1 = (
112+
| SELECT MAX(partcol1)
113+
| FROM srcpart
114+
|)
115+
|AND partcol2= 'event'
116+
|GROUP BY partcol1
117+
|""".stripMargin
118+
)
119+
106120
testNotMetadataOnly(
107121
"Don't optimize metadata only query for non-partition columns",
108122
"select col1 from srcpart group by col1",
@@ -150,30 +164,4 @@ class OptimizeMetadataOnlyQuerySuite extends QueryTest with SharedSparkSession {
150164
}
151165
}
152166
}
153-
154-
test("SPARK-31590 The filter used by Metadata-only queries should not have Unevaluable") {
155-
Seq(true, false).foreach { enableOptimizeMetadataOnlyQuery =>
156-
withSQLConf(OPTIMIZER_METADATA_ONLY.key -> enableOptimizeMetadataOnlyQuery.toString) {
157-
val df = sql(
158-
"""
159-
|SELECT partcol1, MAX(partcol2) AS partcol2
160-
|FROM srcpart
161-
|WHERE partcol1 = (
162-
| SELECT MAX(partcol1)
163-
| FROM srcpart
164-
|)
165-
|GROUP BY partcol1
166-
""".stripMargin)
167-
val localRelations = df.queryExecution.optimizedPlan.collect {
168-
case l@LocalRelation(_, _, _) => l
169-
}
170-
if (enableOptimizeMetadataOnlyQuery) {
171-
assert(localRelations.size == 1)
172-
} else {
173-
assert(localRelations.size == 0)
174-
}
175-
df.collect()
176-
}
177-
}
178-
}
179167
}

0 commit comments

Comments
 (0)