I am trying to read the local file in client mode on Yarn framework. I was not able to access the local file in client mode also.
import os
import pyspark.sql.functions as F
from os import listdir, path
from pyspark import SparkConf, SparkContext
import argparse
from pyspark import SparkFiles
from pyspark.sql import SparkSession
def main():
spark = SparkSession \
.builder \
.appName("Spark File load example") \
.config("spark.jars","/u/user/someuser/sqljdbc4.jar") \
.config("spark.dynamicAllocation.enabled","true") \
.config("spark.shuffle.service.enabled","true") \
.config("hive.exec.dynamic.partition", "true") \
.config("hive.exec.dynamic.partition.mode", "nonstrict") \
.config("spark.sql.shuffle.partitions","50") \
.config("hive.metastore.uris", "thrift://******.hpc.****.com:9083") \
.enableHiveSupport() \
.getOrCreate()
spark.sparkContext.addFile("/u/user/vikrant/testdata/EMPFILE1.csv")
inputfilename=getinputfile(spark)
print("input file path is:",inputfilename)
data = processfiledata(spark,inputfilename)
data.show()
spark.stop()
def getinputfile(spark):
spark_files_dir = SparkFiles.getRootDirectory()
print("spark_files_dir:",spark_files_dir)
inputfile = [filename
for filename in listdir(spark_files_dir)
if filename.endswith('EMPFILE1.csv')]
if len(inputfile) != 0:
path_to_input_file = path.join(spark_files_dir, inputfile[0])
else:
print("file path not found",path_to_input_file)
print("inputfile name:",inputfile)
return path_to_input_file
def processfiledata(spark,inputfilename):
dataframe= spark.read.format("csv").option("header","false").load(inputfilename)
return dataframe
if __name__ == "__main__":
main()
Below is my shell script-->
spark-submit --master yarn --deploy-mode client PysparkMainModulenew.py --files /u/user/vikrant/testdata/EMPFILE1.csv
Below is the error message-->
('spark_files_dir:', u'/h/tmp/spark-76bdbd48-cbb4-4e8f-971a-383b899f79b0/userFiles-ee6dcdec-b320-433b-8491-311927c75fe2') ('inputfile name:', [u'EMPFILE1.csv']) ('input file path is:', u'/h/tmp/spark-76bdbd48-cbb4-4e8f-971a-383b899f79b0/userFiles-ee6dcdec-b320-433b-8491-311927c75fe2/EMPFILE1.csv') Traceback (most recent call last): File "/u/user/vikrant/testdata/PysparkMainModulenew.py", line 57, in main() File "/u/user/vikrant/testdata/PysparkMainModulenew.py", line 31, in main data = processfiledata(spark,inputfilename) File "/u/user/vikrant/testdata/PysparkMainModulenew.py", line 53, in processfiledata dataframe = spark.read.format("csv").option("header","false").load(inputfilename) File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/sql/readwriter.py", line 166, in load File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in call File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/sql/utils.py", line 69, in deco pyspark.sql.utils.AnalysisException: u'Path does not exist: hdfs://hdd2cluster/h/tmp/spark-76bdbd48-cbb4-4e8f-971a-383b899f79b0/userFiles-ee6dcdec-b320-433b-8491-311927c75fe2/EMPFILE1.csv;'
file://
– Sudev Ambadi