2
votes

I want to load a specific list of files into Spark. I made a UDF which filters the list of files, so I obtained a Seq<String> with the files I want to load. I'm trying to use public Dataset<Row> csv(scala.collection.Seq<String> paths) link to API but for some reasons, it tells me "overloaded method value csv with alternatives: (paths: String*)org.apache.spark.sql.DataFrame (csvDataset: org.apache.spark.sql.Dataset[String])org.apache.spark.sql.DataFrame (path: String)org.apache.spark.sql.DataFrame cannot be applied to (Seq[String])"

What am I missing here?

package main.scala.task

import org.apache.spark.sql.SparkSession


object Test {

  def date_filter: (java.io.File, Int) => Boolean = (file: java.io.File, threshold: Int) => {
    val pat1 = "(\\d+)".r // Pattern to capture timestamp
    val matcher1 = pat1.findFirstMatchIn(file.getName) // Capture timestamp

    matcher1 match {
      case Some(matched1) => {if (matched1.group(1).toInt > threshold) {true} else {false}}
      case None => false
    }
  }

  def main(args: Array[String]) {

    val spark = SparkSession.builder().getOrCreate()
    import spark.implicits._

    val path = "/my_folder/Test"

    val now: Int = 1486022490
    var files = new java.io.File(path).listFiles.filter(p => date_filter(p,now)).map(_.getName).toSeq

    val df = spark.read.csv(files)

  }
}
1

1 Answers

3
votes

The error is telling you that you need to use varargs instead of passing a sequence of strings.

val files: Seq[String] = Seq("/path/to/file1.txt", "/path/to/file2.txt")
val df = spark.read.csv(files:_*)

Also, you linked to the Java docs instead of the Scala docs which might explain your confusion.