Skip to content

Commit

Permalink
Reformat with scalafmt 3.7.17
Browse files Browse the repository at this point in the history
Executed command: scalafmt --non-interactive
  • Loading branch information
scala-steward authored and nightscape committed Nov 22, 2023
1 parent 3918a56 commit 2fb214e
Show file tree
Hide file tree
Showing 5 changed files with 9 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,5 @@ trait ExcelParserBase {

protected def getCurrentInput: UTF8String
def badRecord(partialResults: Array[InternalRow], baseException: Throwable): BadRecordException =
BadRecordException(() => getCurrentInput, () => partialResults.headOption, baseException)
BadRecordException(() => getCurrentInput, () => partialResults.headOption, baseException)
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,5 @@ trait ExcelParserBase {

protected def getCurrentInput: UTF8String
def badRecord(partialResults: Array[InternalRow], baseException: Throwable): BadRecordException =
BadRecordException(() => getCurrentInput, () => partialResults.headOption, baseException)
BadRecordException(() => getCurrentInput, () => partialResults.headOption, baseException)
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,5 @@ trait ExcelParserBase {

protected def getCurrentInput: UTF8String
def badRecord(partialResults: Array[InternalRow], baseException: Throwable): BadRecordException =
BadRecordException(() => getCurrentInput, () => partialResults, baseException)
BadRecordException(() => getCurrentInput, () => partialResults, baseException)
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ package com.crealytics.spark.excel.v2

import org.apache.hadoop.conf.Configuration
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.catalyst.{InternalRow, FileSourceOptions}
import org.apache.spark.sql.catalyst.{FileSourceOptions, InternalRow}
import org.apache.spark.sql.connector.read.PartitionReader
import org.apache.spark.sql.execution.datasources.PartitionedFile
import org.apache.spark.sql.execution.datasources.v2._
Expand Down Expand Up @@ -54,10 +54,9 @@ case class ExcelPartitionReaderFactory(
parsedOptions: ExcelOptions,
filters: Seq[Filter]
) extends FilePartitionReaderFactory {
protected def options: FileSourceOptions = new FileSourceOptions(Map(
FileSourceOptions.IGNORE_CORRUPT_FILES -> "true",
FileSourceOptions.IGNORE_MISSING_FILES -> "true"
))
protected def options: FileSourceOptions = new FileSourceOptions(
Map(FileSourceOptions.IGNORE_CORRUPT_FILES -> "true", FileSourceOptions.IGNORE_MISSING_FILES -> "true")
)
override def buildReader(file: PartitionedFile): PartitionReader[InternalRow] = {
val conf = broadcastedConf.value.value
val actualDataSchema =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ import org.apache.poi.ss.usermodel.DateUtil
* The pushdown filters that should be applied to converted values.
*/
class ExcelParser(dataSchema: StructType, requiredSchema: StructType, val options: ExcelOptions, filters: Seq[Filter])
extends Logging with ExcelParserBase {
extends Logging
with ExcelParserBase {
require(
requiredSchema.toSet.subsetOf(dataSchema.toSet),
s"requiredSchema (${requiredSchema.catalogString}) should be the subset of " +
Expand Down

0 comments on commit 2fb214e

Please sign in to comment.