From 7380ed5b46b281e231b68a2c24b99cb751e1ebb0 Mon Sep 17 00:00:00 2001 From: Parth Chandra Date: Wed, 6 May 2026 11:03:51 -0700 Subject: [PATCH 1/2] chore: remove legacy ENABLE_COMET_SCAN_ONLY and ENABLE_COMET_ANSI_MODE env vars from Spark diffs These env vars are no longer set when running Spark SQL tests. ENABLE_COMET_SCAN_ONLY is removed from all diffs (always enable exec+shuffle). ENABLE_COMET_ANSI_MODE is removed from 3.4 and 3.5 diffs (ANSI mode has dedicated tests in the Spark SQL suite). Closes #2724 Co-Authored-By: Claude Opus 4.6 --- dev/diffs/3.4.3.diff | 72 +++++++++----------------------------------- dev/diffs/3.5.8.diff | 72 +++++++++----------------------------------- dev/diffs/4.0.2.diff | 52 +++++++++----------------------- dev/diffs/4.1.1.diff | 52 +++++++++----------------------- 4 files changed, 60 insertions(+), 188 deletions(-) diff --git a/dev/diffs/3.4.3.diff b/dev/diffs/3.4.3.diff index 8a939f2028..97e20bcea1 100644 --- a/dev/diffs/3.4.3.diff +++ b/dev/diffs/3.4.3.diff @@ -1998,9 +1998,9 @@ index 104b4e416cd..b8af360fa14 100644 // Note that, if record level filtering is enabled, it should be a single record. // If no filter is pushed down to Parquet, it should be the total length of data. - assert(actual > 1 && actual < data.length) -+ // Only enable Comet test iff it's scan only, since with native execution ++ // Skip when Comet is enabled since with native execution + // `stripSparkFilter` can't remove the native filter -+ if (!isCometEnabled || isCometScanOnly) { ++ if (!isCometEnabled) { + assert(actual > 1 && actual < data.length) + } } @@ -2011,9 +2011,9 @@ index 104b4e416cd..b8af360fa14 100644 // Note that, if record level filtering is enabled, it should be a single record. // If no filter is pushed down to Parquet, it should be the total length of data. - assert(actual > 1 && actual < data.length) -+ // Only enable Comet test iff it's scan only, since with native execution ++ // Skip when Comet is enabled since with native execution + // `stripSparkFilter` can't remove the native filter -+ if (!isCometEnabled || isCometScanOnly) { ++ if (!isCometEnabled) { + assert(actual > 1 && actual < data.length) + } } @@ -2934,24 +2934,6 @@ index dd55fcfe42c..99bc018008a 100644 + * Whether Comet extension is enabled + */ + protected def isCometEnabled: Boolean = SparkSession.isCometEnabled -+ -+ /** -+ * Whether to enable ansi mode This is only effective when -+ * [[isCometEnabled]] returns true. -+ */ -+ protected def enableCometAnsiMode: Boolean = { -+ val v = System.getenv("ENABLE_COMET_ANSI_MODE") -+ v != null && v.toBoolean -+ } -+ -+ /** -+ * Whether Spark should only apply Comet scan optimization. This is only effective when -+ * [[isCometEnabled]] returns true. -+ */ -+ protected def isCometScanOnly: Boolean = { -+ val v = System.getenv("ENABLE_COMET_SCAN_ONLY") -+ v != null && v.toBoolean -+ } + protected override def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = { SparkSession.setActiveSession(spark) @@ -2980,23 +2962,12 @@ index ed2e309fa07..a5ea58146ad 100644 + .set("spark.comet.enabled", "true") + .set("spark.comet.parquet.respectFilterPushdown", "true") + -+ if (!isCometScanOnly) { -+ conf -+ .set("spark.comet.exec.enabled", "true") -+ .set("spark.shuffle.manager", -+ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") -+ .set("spark.comet.exec.shuffle.enabled", "true") -+ .set("spark.comet.memoryOverhead", "10g") -+ } else { -+ conf -+ .set("spark.comet.exec.enabled", "false") -+ .set("spark.comet.exec.shuffle.enabled", "false") -+ } -+ -+ if (enableCometAnsiMode) { -+ conf -+ .set("spark.sql.ansi.enabled", "true") -+ } ++ conf ++ .set("spark.comet.exec.enabled", "true") ++ .set("spark.shuffle.manager", ++ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") ++ .set("spark.comet.exec.shuffle.enabled", "true") ++ .set("spark.comet.memoryOverhead", "10g") + } conf.set( StaticSQLConf.WAREHOUSE_PATH, @@ -3140,24 +3111,11 @@ index 07361cfdce9..97dab2a3506 100644 + .set("spark.sql.extensions", "org.apache.comet.CometSparkSessionExtensions") + .set("spark.comet.enabled", "true") + -+ val v = System.getenv("ENABLE_COMET_SCAN_ONLY") -+ if (v == null || !v.toBoolean) { -+ conf -+ .set("spark.comet.exec.enabled", "true") -+ .set("spark.shuffle.manager", -+ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") -+ .set("spark.comet.exec.shuffle.enabled", "true") -+ } else { -+ conf -+ .set("spark.comet.exec.enabled", "false") -+ .set("spark.comet.exec.shuffle.enabled", "false") -+ } -+ -+ val a = System.getenv("ENABLE_COMET_ANSI_MODE") -+ if (a != null && a.toBoolean) { -+ conf -+ .set("spark.sql.ansi.enabled", "true") -+ } ++ conf ++ .set("spark.comet.exec.enabled", "true") ++ .set("spark.shuffle.manager", ++ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") ++ .set("spark.comet.exec.shuffle.enabled", "true") + } + conf diff --git a/dev/diffs/3.5.8.diff b/dev/diffs/3.5.8.diff index d3a5c617dc..c5bef7c3fa 100644 --- a/dev/diffs/3.5.8.diff +++ b/dev/diffs/3.5.8.diff @@ -1979,9 +1979,9 @@ index 8e88049f51e..20d7ef7b1bc 100644 // Note that, if record level filtering is enabled, it should be a single record. // If no filter is pushed down to Parquet, it should be the total length of data. - assert(actual > 1 && actual < data.length) -+ // Only enable Comet test iff it's scan only, since with native execution ++ // Skip when Comet is enabled since with native execution + // `stripSparkFilter` can't remove the native filter -+ if (!isCometEnabled || isCometScanOnly) { ++ if (!isCometEnabled) { + assert(actual > 1 && actual < data.length) + } } @@ -1992,9 +1992,9 @@ index 8e88049f51e..20d7ef7b1bc 100644 // Note that, if record level filtering is enabled, it should be a single record. // If no filter is pushed down to Parquet, it should be the total length of data. - assert(actual > 1 && actual < data.length) -+ // Only enable Comet test iff it's scan only, since with native execution ++ // Skip when Comet is enabled since with native execution + // `stripSparkFilter` can't remove the native filter -+ if (!isCometEnabled || isCometScanOnly) { ++ if (!isCometEnabled) { + assert(actual > 1 && actual < data.length) + } } @@ -2886,24 +2886,6 @@ index e937173a590..7d20538bc68 100644 + * Whether Comet extension is enabled + */ + protected def isCometEnabled: Boolean = SparkSession.isCometEnabled -+ -+ /** -+ * Whether to enable ansi mode This is only effective when -+ * [[isCometEnabled]] returns true. -+ */ -+ protected def enableCometAnsiMode: Boolean = { -+ val v = System.getenv("ENABLE_COMET_ANSI_MODE") -+ v != null && v.toBoolean -+ } -+ -+ /** -+ * Whether Spark should only apply Comet scan optimization. This is only effective when -+ * [[isCometEnabled]] returns true. -+ */ -+ protected def isCometScanOnly: Boolean = { -+ val v = System.getenv("ENABLE_COMET_SCAN_ONLY") -+ v != null && v.toBoolean -+ } + protected override def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = { SparkSession.setActiveSession(spark) @@ -2932,23 +2914,12 @@ index ed2e309fa07..a5ea58146ad 100644 + .set("spark.comet.enabled", "true") + .set("spark.comet.parquet.respectFilterPushdown", "true") + -+ if (!isCometScanOnly) { -+ conf -+ .set("spark.comet.exec.enabled", "true") -+ .set("spark.shuffle.manager", -+ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") -+ .set("spark.comet.exec.shuffle.enabled", "true") -+ .set("spark.comet.memoryOverhead", "10g") -+ } else { -+ conf -+ .set("spark.comet.exec.enabled", "false") -+ .set("spark.comet.exec.shuffle.enabled", "false") -+ } -+ -+ if (enableCometAnsiMode) { -+ conf -+ .set("spark.sql.ansi.enabled", "true") -+ } ++ conf ++ .set("spark.comet.exec.enabled", "true") ++ .set("spark.shuffle.manager", ++ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") ++ .set("spark.comet.exec.shuffle.enabled", "true") ++ .set("spark.comet.memoryOverhead", "10g") + } conf.set( StaticSQLConf.WAREHOUSE_PATH, @@ -3092,24 +3063,11 @@ index 1d646f40b3e..5babe505301 100644 + .set("spark.sql.extensions", "org.apache.comet.CometSparkSessionExtensions") + .set("spark.comet.enabled", "true") + -+ val v = System.getenv("ENABLE_COMET_SCAN_ONLY") -+ if (v == null || !v.toBoolean) { -+ conf -+ .set("spark.comet.exec.enabled", "true") -+ .set("spark.shuffle.manager", -+ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") -+ .set("spark.comet.exec.shuffle.enabled", "true") -+ } else { -+ conf -+ .set("spark.comet.exec.enabled", "false") -+ .set("spark.comet.exec.shuffle.enabled", "false") -+ } -+ -+ val a = System.getenv("ENABLE_COMET_ANSI_MODE") -+ if (a != null && a.toBoolean) { -+ conf -+ .set("spark.sql.ansi.enabled", "true") -+ } ++ conf ++ .set("spark.comet.exec.enabled", "true") ++ .set("spark.shuffle.manager", ++ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") ++ .set("spark.comet.exec.shuffle.enabled", "true") + } + conf diff --git a/dev/diffs/4.0.2.diff b/dev/diffs/4.0.2.diff index 5948292d7b..26d80f1665 100644 --- a/dev/diffs/4.0.2.diff +++ b/dev/diffs/4.0.2.diff @@ -2583,9 +2583,9 @@ index 6080a5e8e4b..ea058d57b4b 100644 // Note that, if record level filtering is enabled, it should be a single record. // If no filter is pushed down to Parquet, it should be the total length of data. - assert(actual > 1 && actual < data.length) -+ // Only enable Comet test iff it's scan only, since with native execution ++ // Skip when Comet is enabled since with native execution + // `stripSparkFilter` can't remove the native filter -+ if (!isCometEnabled || isCometScanOnly) { ++ if (!isCometEnabled) { + assert(actual > 1 && actual < data.length) + } } @@ -2596,9 +2596,9 @@ index 6080a5e8e4b..ea058d57b4b 100644 // Note that, if record level filtering is enabled, it should be a single record. // If no filter is pushed down to Parquet, it should be the total length of data. - assert(actual > 1 && actual < data.length) -+ // Only enable Comet test iff it's scan only, since with native execution ++ // Skip when Comet is enabled since with native execution + // `stripSparkFilter` can't remove the native filter -+ if (!isCometEnabled || isCometScanOnly) { ++ if (!isCometEnabled) { + assert(actual > 1 && actual < data.length) + } } @@ -3604,15 +3604,6 @@ index f0f3f94b811..f77b54dcef9 100644 + * Whether Comet extension is enabled + */ + protected def isCometEnabled: Boolean = SparkSession.isCometEnabled -+ -+ /** -+ * Whether Spark should only apply Comet scan optimization. This is only effective when -+ * [[isCometEnabled]] returns true. -+ */ -+ protected def isCometScanOnly: Boolean = { -+ val v = System.getenv("ENABLE_COMET_SCAN_ONLY") -+ v != null && v.toBoolean -+ } + protected override def withSQLConf[T](pairs: (String, String)*)(f: => T): T = { SparkSession.setActiveSession(spark) @@ -3645,18 +3636,12 @@ index 245219c1756..a611836f086 100644 + .set("spark.comet.enabled", "true") + .set("spark.comet.parquet.respectFilterPushdown", "true") + -+ if (!isCometScanOnly) { -+ conf -+ .set("spark.comet.exec.enabled", "true") -+ .set("spark.shuffle.manager", -+ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") -+ .set("spark.comet.exec.shuffle.enabled", "true") -+ .set("spark.comet.memoryOverhead", "10g") -+ } else { -+ conf -+ .set("spark.comet.exec.enabled", "false") -+ .set("spark.comet.exec.shuffle.enabled", "false") -+ } ++ conf ++ .set("spark.comet.exec.enabled", "true") ++ .set("spark.shuffle.manager", ++ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") ++ .set("spark.comet.exec.shuffle.enabled", "true") ++ .set("spark.comet.memoryOverhead", "10g") + + } conf.set( @@ -3823,18 +3808,11 @@ index a394d0b7393..a4bc3d3fd8e 100644 + .set("spark.sql.extensions", "org.apache.comet.CometSparkSessionExtensions") + .set("spark.comet.enabled", "true") + -+ val v = System.getenv("ENABLE_COMET_SCAN_ONLY") -+ if (v == null || !v.toBoolean) { -+ conf -+ .set("spark.comet.exec.enabled", "true") -+ .set("spark.shuffle.manager", -+ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") -+ .set("spark.comet.exec.shuffle.enabled", "true") -+ } else { -+ conf -+ .set("spark.comet.exec.enabled", "false") -+ .set("spark.comet.exec.shuffle.enabled", "false") -+ } ++ conf ++ .set("spark.comet.exec.enabled", "true") ++ .set("spark.shuffle.manager", ++ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") ++ .set("spark.comet.exec.shuffle.enabled", "true") + } + + conf diff --git a/dev/diffs/4.1.1.diff b/dev/diffs/4.1.1.diff index 4ffb5377bf..aa151e4302 100644 --- a/dev/diffs/4.1.1.diff +++ b/dev/diffs/4.1.1.diff @@ -2905,9 +2905,9 @@ index 6b73cc8618d..624694916fb 100644 // Note that, if record level filtering is enabled, it should be a single record. // If no filter is pushed down to Parquet, it should be the total length of data. - assert(actual > 1 && actual < data.length) -+ // Only enable Comet test iff it's scan only, since with native execution ++ // Skip when Comet is enabled since with native execution + // `stripSparkFilter` can't remove the native filter -+ if (!isCometEnabled || isCometScanOnly) { ++ if (!isCometEnabled) { + assert(actual > 1 && actual < data.length) + } } @@ -2918,9 +2918,9 @@ index 6b73cc8618d..624694916fb 100644 // Note that, if record level filtering is enabled, it should be a single record. // If no filter is pushed down to Parquet, it should be the total length of data. - assert(actual > 1 && actual < data.length) -+ // Only enable Comet test iff it's scan only, since with native execution ++ // Skip when Comet is enabled since with native execution + // `stripSparkFilter` can't remove the native filter -+ if (!isCometEnabled || isCometScanOnly) { ++ if (!isCometEnabled) { + assert(actual > 1 && actual < data.length) + } } @@ -4087,15 +4087,6 @@ index f0f3f94b811..f77b54dcef9 100644 + * Whether Comet extension is enabled + */ + protected def isCometEnabled: Boolean = SparkSession.isCometEnabled -+ -+ /** -+ * Whether Spark should only apply Comet scan optimization. This is only effective when -+ * [[isCometEnabled]] returns true. -+ */ -+ protected def isCometScanOnly: Boolean = { -+ val v = System.getenv("ENABLE_COMET_SCAN_ONLY") -+ v != null && v.toBoolean -+ } + protected override def withSQLConf[T](pairs: (String, String)*)(f: => T): T = { SparkSession.setActiveSession(spark) @@ -4128,18 +4119,12 @@ index 245219c1756..a611836f086 100644 + .set("spark.comet.enabled", "true") + .set("spark.comet.parquet.respectFilterPushdown", "true") + -+ if (!isCometScanOnly) { -+ conf -+ .set("spark.comet.exec.enabled", "true") -+ .set("spark.shuffle.manager", -+ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") -+ .set("spark.comet.exec.shuffle.enabled", "true") -+ .set("spark.comet.memoryOverhead", "10g") -+ } else { -+ conf -+ .set("spark.comet.exec.enabled", "false") -+ .set("spark.comet.exec.shuffle.enabled", "false") -+ } ++ conf ++ .set("spark.comet.exec.enabled", "true") ++ .set("spark.shuffle.manager", ++ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") ++ .set("spark.comet.exec.shuffle.enabled", "true") ++ .set("spark.comet.memoryOverhead", "10g") + + } conf.set( @@ -4306,18 +4291,11 @@ index a394d0b7393..a4bc3d3fd8e 100644 + .set("spark.sql.extensions", "org.apache.comet.CometSparkSessionExtensions") + .set("spark.comet.enabled", "true") + -+ val v = System.getenv("ENABLE_COMET_SCAN_ONLY") -+ if (v == null || !v.toBoolean) { -+ conf -+ .set("spark.comet.exec.enabled", "true") -+ .set("spark.shuffle.manager", -+ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") -+ .set("spark.comet.exec.shuffle.enabled", "true") -+ } else { -+ conf -+ .set("spark.comet.exec.enabled", "false") -+ .set("spark.comet.exec.shuffle.enabled", "false") -+ } ++ conf ++ .set("spark.comet.exec.enabled", "true") ++ .set("spark.shuffle.manager", ++ "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager") ++ .set("spark.comet.exec.shuffle.enabled", "true") + } + + conf From e38f527912a75bc38f899e558f6ab17fd7235535 Mon Sep 17 00:00:00 2001 From: Parth Chandra Date: Wed, 6 May 2026 18:18:33 -0700 Subject: [PATCH 2/2] fix: correct hunk header line counts in Spark diffs Co-Authored-By: Claude Opus 4.6 --- dev/diffs/3.4.3.diff | 6 +++--- dev/diffs/3.5.8.diff | 6 +++--- dev/diffs/4.0.2.diff | 6 +++--- dev/diffs/4.1.1.diff | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/dev/diffs/3.4.3.diff b/dev/diffs/3.4.3.diff index 97e20bcea1..68db5f8600 100644 --- a/dev/diffs/3.4.3.diff +++ b/dev/diffs/3.4.3.diff @@ -2926,7 +2926,7 @@ index dd55fcfe42c..99bc018008a 100644 if (testTags.exists(_.isInstanceOf[DisableAdaptiveExecution])) { super.test(testName, testTags: _*) { withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { -@@ -242,6 +272,29 @@ private[sql] trait SQLTestUtilsBase +@@ -242,6 +272,11 @@ private[sql] trait SQLTestUtilsBase protected override def _sqlContext: SQLContext = self.spark.sqlContext } @@ -2951,7 +2951,7 @@ diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSessio index ed2e309fa07..a5ea58146ad 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala -@@ -74,6 +74,31 @@ trait SharedSparkSessionBase +@@ -74,6 +74,20 @@ trait SharedSparkSessionBase // this rule may potentially block testing of other optimization rules such as // ConstantPropagation etc. .set(SQLConf.OPTIMIZER_EXCLUDED_RULES.key, ConvertToLocalRelation.ruleName) @@ -3064,7 +3064,7 @@ diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.sca index 07361cfdce9..97dab2a3506 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala -@@ -55,25 +55,54 @@ object TestHive +@@ -55,25 +55,41 @@ object TestHive new SparkContext( System.getProperty("spark.sql.test.master", "local[1]"), "TestSQLContext", diff --git a/dev/diffs/3.5.8.diff b/dev/diffs/3.5.8.diff index c5bef7c3fa..e7b6e5fffa 100644 --- a/dev/diffs/3.5.8.diff +++ b/dev/diffs/3.5.8.diff @@ -2878,7 +2878,7 @@ index e937173a590..7d20538bc68 100644 if (testTags.exists(_.isInstanceOf[DisableAdaptiveExecution])) { super.test(testName, testTags: _*) { withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { -@@ -242,6 +272,29 @@ private[sql] trait SQLTestUtilsBase +@@ -242,6 +272,11 @@ private[sql] trait SQLTestUtilsBase protected override def _sqlContext: SQLContext = self.spark.sqlContext } @@ -2903,7 +2903,7 @@ diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSessio index ed2e309fa07..a5ea58146ad 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala -@@ -74,6 +74,31 @@ trait SharedSparkSessionBase +@@ -74,6 +74,20 @@ trait SharedSparkSessionBase // this rule may potentially block testing of other optimization rules such as // ConstantPropagation etc. .set(SQLConf.OPTIMIZER_EXCLUDED_RULES.key, ConvertToLocalRelation.ruleName) @@ -3016,7 +3016,7 @@ diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.sca index 1d646f40b3e..5babe505301 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala -@@ -53,25 +53,54 @@ object TestHive +@@ -53,25 +53,41 @@ object TestHive new SparkContext( System.getProperty("spark.sql.test.master", "local[1]"), "TestSQLContext", diff --git a/dev/diffs/4.0.2.diff b/dev/diffs/4.0.2.diff index 26d80f1665..a411b56562 100644 --- a/dev/diffs/4.0.2.diff +++ b/dev/diffs/4.0.2.diff @@ -3596,7 +3596,7 @@ index f0f3f94b811..f77b54dcef9 100644 if (testTags.exists(_.isInstanceOf[DisableAdaptiveExecution])) { super.test(testName, testTags: _*) { withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { -@@ -248,8 +278,24 @@ private[sql] trait SQLTestUtilsBase +@@ -248,8 +278,15 @@ private[sql] trait SQLTestUtilsBase override protected def converter: ColumnNodeToExpressionConverter = self.spark.converter } @@ -3625,7 +3625,7 @@ diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSessio index 245219c1756..a611836f086 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala -@@ -75,6 +75,27 @@ trait SharedSparkSessionBase +@@ -75,6 +75,21 @@ trait SharedSparkSessionBase // this rule may potentially block testing of other optimization rules such as // ConstantPropagation etc. .set(SQLConf.OPTIMIZER_EXCLUDED_RULES.key, ConvertToLocalRelation.ruleName) @@ -3768,7 +3768,7 @@ diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.sca index a394d0b7393..a4bc3d3fd8e 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala -@@ -53,24 +53,41 @@ object TestHive +@@ -53,24 +53,34 @@ object TestHive new SparkContext( System.getProperty("spark.sql.test.master", "local[1]"), "TestSQLContext", diff --git a/dev/diffs/4.1.1.diff b/dev/diffs/4.1.1.diff index aa151e4302..fc5de15915 100644 --- a/dev/diffs/4.1.1.diff +++ b/dev/diffs/4.1.1.diff @@ -4079,7 +4079,7 @@ index f0f3f94b811..f77b54dcef9 100644 if (testTags.exists(_.isInstanceOf[DisableAdaptiveExecution])) { super.test(testName, testTags: _*) { withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { -@@ -248,8 +278,24 @@ private[sql] trait SQLTestUtilsBase +@@ -248,8 +278,15 @@ private[sql] trait SQLTestUtilsBase override protected def converter: ColumnNodeToExpressionConverter = self.spark.converter } @@ -4108,7 +4108,7 @@ diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSessio index 245219c1756..a611836f086 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala -@@ -75,6 +75,27 @@ trait SharedSparkSessionBase +@@ -75,6 +75,21 @@ trait SharedSparkSessionBase // this rule may potentially block testing of other optimization rules such as // ConstantPropagation etc. .set(SQLConf.OPTIMIZER_EXCLUDED_RULES.key, ConvertToLocalRelation.ruleName) @@ -4251,7 +4251,7 @@ diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.sca index a394d0b7393..a4bc3d3fd8e 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala -@@ -53,24 +53,41 @@ object TestHive +@@ -53,24 +53,34 @@ object TestHive new SparkContext( System.getProperty("spark.sql.test.master", "local[1]"), "TestSQLContext",