diff --git a/mllib/src/main/scala/org/apache/spark/ml/source/image/ImageFileFormat.scala b/mllib/src/main/scala/org/apache/spark/ml/source/image/ImageFileFormat.scala index bf6e6b8eec036..4b8ce8eb85dcf 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/source/image/ImageFileFormat.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/source/image/ImageFileFormat.scala @@ -32,7 +32,7 @@ import org.apache.spark.sql.sources.{DataSourceRegister, Filter} import org.apache.spark.sql.types.StructType import org.apache.spark.util.SerializableConfiguration -private[image] class ImageFileFormat extends FileFormat with DataSourceRegister { +private[image] case class ImageFileFormat() extends FileFormat with DataSourceRegister { override def inferSchema( sparkSession: SparkSession, diff --git a/mllib/src/main/scala/org/apache/spark/ml/source/libsvm/LibSVMRelation.scala b/mllib/src/main/scala/org/apache/spark/ml/source/libsvm/LibSVMRelation.scala index 9e641161d9b97..f093dc037528a 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/source/libsvm/LibSVMRelation.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/source/libsvm/LibSVMRelation.scala @@ -67,9 +67,9 @@ private[libsvm] class LibSVMOutputWriter( } } -/** @see [[LibSVMDataSource]] for public documentation. */ +// see `LibSVMDataSource` for public documentation. // If this is moved or renamed, please update DataSource's backwardCompatibilityMap. -private[libsvm] class LibSVMFileFormat +private[libsvm] case class LibSVMFileFormat() extends TextBasedFileFormat with DataSourceRegister with Logging { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormat.scala index 54c100282e2db..87326615f3266 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormat.scala @@ -55,7 +55,7 @@ import org.apache.spark.util.SerializableConfiguration * .load("/path/to/fileDir"); * }}} */ -class BinaryFileFormat extends FileFormat with DataSourceRegister { +case class BinaryFileFormat() extends FileFormat with DataSourceRegister { import BinaryFileFormat._ diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVFileFormat.scala index 313c803c43061..a65f7bbbeba50 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVFileFormat.scala @@ -35,7 +35,7 @@ import org.apache.spark.util.SerializableConfiguration /** * Provides access to CSV data from pure SQL statements. */ -class CSVFileFormat extends TextBasedFileFormat with DataSourceRegister { +case class CSVFileFormat() extends TextBasedFileFormat with DataSourceRegister { override def shortName(): String = "csv" @@ -158,10 +158,6 @@ class CSVFileFormat extends TextBasedFileFormat with DataSourceRegister { override def toString: String = "CSV" - override def hashCode(): Int = getClass.hashCode() - - override def equals(other: Any): Boolean = other.isInstanceOf[CSVFileFormat] - /** * Allow reading variant from CSV, but don't allow writing variant into CSV. This is because the * written data (the string representation of variant) may not be read back as the same variant. diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala index ae8cdf98e20c7..e38ca137b162d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala @@ -32,7 +32,7 @@ import org.apache.spark.sql.sources._ import org.apache.spark.sql.types._ import org.apache.spark.util.SerializableConfiguration -class JsonFileFormat extends TextBasedFileFormat with DataSourceRegister { +case class JsonFileFormat() extends TextBasedFileFormat with DataSourceRegister { override val shortName: String = "json" override def isSplitable( @@ -128,10 +128,6 @@ class JsonFileFormat extends TextBasedFileFormat with DataSourceRegister { override def toString: String = "JSON" - override def hashCode(): Int = getClass.hashCode() - - override def equals(other: Any): Boolean = other.isInstanceOf[JsonFileFormat] - override def supportDataType(dataType: DataType): Boolean = dataType match { case _: VariantType => true diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/TextFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/TextFileFormat.scala index 3f2024126717d..7af239f99d45e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/TextFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/TextFileFormat.scala @@ -37,7 +37,7 @@ import org.apache.spark.util.{SerializableConfiguration, Utils} /** * A data source for reading text files. The text files must be encoded as UTF-8. */ -class TextFileFormat extends TextBasedFileFormat with DataSourceRegister { +case class TextFileFormat() extends TextBasedFileFormat with DataSourceRegister { override def shortName(): String = "text" diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/xml/XmlFileFormat.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/xml/XmlFileFormat.scala index eb647c41d0d19..e5004c499a070 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/xml/XmlFileFormat.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/xml/XmlFileFormat.scala @@ -35,7 +35,7 @@ import org.apache.spark.util.SerializableConfiguration /** * Provides access to XML data from pure SQL statements. */ -class XmlFileFormat extends TextBasedFileFormat with DataSourceRegister { +case class XmlFileFormat() extends TextBasedFileFormat with DataSourceRegister { override def shortName(): String = "xml" @@ -132,10 +132,6 @@ class XmlFileFormat extends TextBasedFileFormat with DataSourceRegister { override def toString: String = "XML" - override def hashCode(): Int = getClass.hashCode() - - override def equals(other: Any): Boolean = other.isInstanceOf[XmlFileFormat] - override def supportDataType(dataType: DataType): Boolean = dataType match { case _: VariantType => true diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala index 9676c42688920..94a0501b74d47 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala @@ -722,7 +722,7 @@ object LastArguments { } /** A test [[FileFormat]] that records the arguments passed to buildReader, and returns nothing. */ -class TestFileFormat extends TextBasedFileFormat { +case class TestFileFormat() extends TextBasedFileFormat { override def toString: String = "TestFileFormat" diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveFileFormat.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveFileFormat.scala index 0d4efd9e77742..d8f9405780d8c 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveFileFormat.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveFileFormat.scala @@ -48,7 +48,7 @@ import org.apache.spark.util.SerializableJobConf * * TODO: implement the read logic. */ -class HiveFileFormat(fileSinkConf: FileSinkDesc) +case class HiveFileFormat(fileSinkConf: FileSinkDesc) extends FileFormat with DataSourceRegister with Logging { def this() = this(null) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala index f3ce637439a13..623ddeb7ac4ab 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala @@ -55,16 +55,12 @@ import org.apache.spark.util.SerializableConfiguration * `FileFormat` for reading ORC files. If this is moved or renamed, please update * `DataSource`'s backwardCompatibilityMap. */ -class OrcFileFormat extends FileFormat with DataSourceRegister with Serializable { +case class OrcFileFormat() extends FileFormat with DataSourceRegister with Serializable { override def shortName(): String = "orc" override def toString: String = "ORC" - override def hashCode(): Int = getClass.hashCode() - - override def equals(other: Any): Boolean = other.isInstanceOf[OrcFileFormat] - override def inferSchema( sparkSession: SparkSession, options: Map[String, String], diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextRelation.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextRelation.scala index a89ea2424696e..d19db3eb0ed75 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextRelation.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextRelation.scala @@ -33,7 +33,7 @@ import org.apache.spark.util.ArrayImplicits._ import org.apache.spark.util.SerializableConfiguration import org.apache.spark.util.Utils -class SimpleTextSource extends TextBasedFileFormat with DataSourceRegister { +case class SimpleTextSource() extends TextBasedFileFormat with DataSourceRegister { override def shortName(): String = "test" override def inferSchema(