I have a dataframe column called 'description' value in the below format
ABC XXXXXXXXXXXX STORE NAME ABC TYPE1
I will like to parse it into different 3 columns like below
| mode | type | store | description |
|------------------------------------------------------------------------|
| ABC | TYPE1 | STORE NAME | ABC XXXXXXXXXXXX STORE NAME ABC TYPE1 |
I tried the method suggested in the like here. It works for simple UDF function but not for the function I have written. The challenge is that the value of store could be more 2 words or no fixed number of words.
def myFunc1: (String => (String, String, String)) = { description =>
var descripe = description.split(" ")
val type = descripe(descripe.size - 1)
descripe = description.substring(description.indexOf("ABC") + 4, description.lastIndexOf("ABC")).split(" ")
val mode = descripe(0)
descripe(0) = ""
val store = descripe.mkString(" ").trim
(mode, store, type)
}
val schema = StructType(Array(
StructField("mode", StringType, true),
StructField("store", StringType, true),
StructField("type", StringType, true)
))
val myUDF = udf(myFunc1, schema)
val test = pos.withColumn("test", myUDF(col("description")))
test.printSchema()
val a =test.withColumn("mode", col("test").getItem("_1"))
.withColumn("store", col("test").getItem("_2"))
.withColumn("type", col("test").getItem("_3"))
.drop(col("test"))
a.printSchema()
a.show(5, false)
I get the below error when I execute
18/10/06 21:38:02 ERROR Executor: Exception in task 0.0 in stage 5.0 (TID 5) org.apache.spark.SparkException: Failed to execute user defined function($anonfun$myFunc1$1$1: (string) => struct(mode:string,store:string,type:string)) at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source) at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43) at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:395) at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:234) at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:228) at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827) at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) at org.apache.spark.scheduler.Task.run(Task.scala:108) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.StringIndexOutOfBoundsException: String index out of range: -4 at java.lang.String.substring(String.java:1967) at com.hasif.bank.track.trasaction.TransactionParser$$anonfun$myFunc1$1$1.apply(TransactionParser.scala:26) at com.hasif.bank.track.trasaction.TransactionParser$$anonfun$myFunc1$1$1.apply(TransactionParser.scala:22) ... 16 more
Any pointers on this will be appreciated.