diff --git a/.github/workflows/spark_sql_test.yml b/.github/workflows/spark_sql_test.yml
index 980629174f..146c32c7fe 100644
--- a/.github/workflows/spark_sql_test.yml
+++ b/.github/workflows/spark_sql_test.yml
@@ -142,6 +142,7 @@ jobs:
- {spark-short: '4.0', spark-full: '4.0.2', java: 17, scan-impl: 'auto'}
- {spark-short: '4.0', spark-full: '4.0.2', java: 21, scan-impl: 'auto'}
- {spark-short: '4.1', spark-full: '4.1.1', java: 17, scan-impl: 'auto'}
+ - {spark-short: '4.2', spark-full: '4.2.0-preview4', java: 17, scan-impl: 'auto'}
fail-fast: false
name: spark-sql-${{ matrix.config.scan-impl }}-${{ matrix.module.name }}/spark-${{ matrix.config.spark-full }}
# Hive tests stay on the standard GitHub-hosted runner: HiveSparkSubmitSuite
diff --git a/dev/diffs/4.2.0-preview4.diff b/dev/diffs/4.2.0-preview4.diff
new file mode 100644
index 0000000000..059d914b35
--- /dev/null
+++ b/dev/diffs/4.2.0-preview4.diff
@@ -0,0 +1,4311 @@
+diff --git a/core/src/test/scala/org/apache/spark/storage/FallbackStorageSuite.scala b/core/src/test/scala/org/apache/spark/storage/FallbackStorageSuite.scala
+index 6df8bc85b51..dabb75e2b75 100644
+--- a/core/src/test/scala/org/apache/spark/storage/FallbackStorageSuite.scala
++++ b/core/src/test/scala/org/apache/spark/storage/FallbackStorageSuite.scala
+@@ -268,6 +268,11 @@ class FallbackStorageSuite extends SparkFunSuite with LocalSparkContext {
+ }
+
+ test("Upload from all decommissioned executors") {
++ // Comet replaces Spark's shuffle with its own native shuffle, which is incompatible with
++ // the fallback storage migration path used by BlockManagerDecommissioner.
++ val cometEnv = System.getenv("ENABLE_COMET")
++ assume(cometEnv == null || cometEnv == "0" || cometEnv == "false",
++ "Skipped when Comet is enabled: incompatible with Comet native shuffle storage")
+ sc = new SparkContext(getSparkConf(2, 2))
+ withSpark(sc) { sc =>
+ TestUtils.waitUntilExecutorsUp(sc, 2, 60000)
+@@ -298,6 +303,11 @@ class FallbackStorageSuite extends SparkFunSuite with LocalSparkContext {
+ }
+
+ test("Upload multi stages") {
++ // Comet replaces Spark's shuffle with its own native shuffle, which is incompatible with
++ // the fallback storage migration path used by BlockManagerDecommissioner.
++ val cometEnv = System.getenv("ENABLE_COMET")
++ assume(cometEnv == null || cometEnv == "0" || cometEnv == "false",
++ "Skipped when Comet is enabled: incompatible with Comet native shuffle storage")
+ sc = new SparkContext(getSparkConf())
+ withSpark(sc) { sc =>
+ TestUtils.waitUntilExecutorsUp(sc, 1, 60000)
+@@ -332,6 +342,11 @@ class FallbackStorageSuite extends SparkFunSuite with LocalSparkContext {
+
+ CompressionCodec.shortCompressionCodecNames.keys.foreach { codec =>
+ test(s"$codec - Newly added executors should access old data from remote storage") {
++ // Comet replaces Spark's shuffle with its own native shuffle, which is incompatible with
++ // the fallback storage migration path used by BlockManagerDecommissioner.
++ val cometEnv = System.getenv("ENABLE_COMET")
++ assume(cometEnv == null || cometEnv == "0" || cometEnv == "false",
++ "Skipped when Comet is enabled: incompatible with Comet native shuffle storage")
+ sc = new SparkContext(getSparkConf(2, 0).set(IO_COMPRESSION_CODEC, codec))
+ withSpark(sc) { sc =>
+ TestUtils.waitUntilExecutorsUp(sc, 2, 60000)
+diff --git a/pom.xml b/pom.xml
+index 1e7774b3ae6..6d36b281332 100644
+--- a/pom.xml
++++ b/pom.xml
+@@ -152,6 +152,8 @@
+ 4.0.3
+ 2.5.3
+ 2.0.8
++ 4.2
++ 0.16.0-SNAPSHOT
+
+
+ org.apache.datasketches
+diff --git a/sql/core/pom.xml b/sql/core/pom.xml
+index cd7b2fe7805..cbd987e29b8 100644
+--- a/sql/core/pom.xml
++++ b/sql/core/pom.xml
+@@ -97,6 +97,10 @@
+ org.apache.spark
+ spark-tags_${scala.binary.version}
+
++
++ org.apache.datafusion
++ comet-spark-spark${spark.version.short}_${scala.binary.version}
++
+
+
+
spark-4.2
2.13.18