diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index d550683485779..42f4a7245031c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -21,7 +21,6 @@ import java.net.{URI, URISyntaxException} import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer -import scala.util.Try import scala.util.control.NonFatal import org.apache.hadoop.fs.{FileContext, FsConstants, Path} @@ -192,18 +191,19 @@ case class AlterTableRenameCommand( } else { val table = catalog.getTableMetadata(oldName) DDLUtils.verifyAlterTableType(catalog, table, isView) - // If an exception is thrown here we can just assume the table is uncached; - // this can happen with Hive tables when the underlying catalog is in-memory. - val wasCached = Try(sparkSession.catalog.isCached(oldName.unquotedString)).getOrElse(false) - if (wasCached) { + // If `optStorageLevel` is defined, the old table was cached. + val optCachedData = sparkSession.sharedState.cacheManager.lookupCachedData( + sparkSession.table(oldName.unquotedString)) + val optStorageLevel = optCachedData.map(_.cachedRepresentation.cacheBuilder.storageLevel) + if (optStorageLevel.isDefined) { CommandUtils.uncacheTableOrView(sparkSession, oldName.unquotedString) } // Invalidate the table last, otherwise uncaching the table would load the logical plan // back into the hive metastore cache catalog.refreshTable(oldName) catalog.renameTable(oldName, newName) - if (wasCached) { - sparkSession.catalog.cacheTable(newName.unquotedString) + optStorageLevel.foreach { storageLevel => + sparkSession.catalog.cacheTable(newName.unquotedString, storageLevel) } } Seq.empty[Row] diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala index 6313370476c93..e14c72eee9646 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala @@ -1250,4 +1250,24 @@ class CachedTableSuite extends QueryTest with SQLTestUtils } } } + + test("SPARK-33786: Cache's storage level should be respected when a table name is altered.") { + withTable("old", "new") { + withTempPath { path => + def getStorageLevel(tableName: String): StorageLevel = { + val table = spark.table(tableName) + val cachedData = spark.sharedState.cacheManager.lookupCachedData(table).get + cachedData.cachedRepresentation.cacheBuilder.storageLevel + } + Seq(1 -> "a").toDF("i", "j").write.parquet(path.getCanonicalPath) + sql(s"CREATE TABLE old USING parquet LOCATION '${path.toURI}'") + sql("CACHE TABLE old OPTIONS('storageLevel' 'MEMORY_ONLY')") + val oldStorageLevel = getStorageLevel("old") + + sql("ALTER TABLE old RENAME TO new") + val newStorageLevel = getStorageLevel("new") + assert(oldStorageLevel === newStorageLevel) + } + } + } }