0
votes

I am new to spark. I am trying to read a file from my master instance but I am getting this error. After research I found out either you need to load data to hdfs or copy across clusters. I am unable to find the commands for doing either of these.

--------------------------------------------------------------------------- Py4JJavaError Traceback (most recent call last) in () ----> 1 ncols = rdd.first().features.size # number of columns (no class) of the dataset

/home/ec2-user/spark/python/pyspark/rdd.pyc in first(self) 1359
ValueError: RDD is empty 1360 """ -> 1361 rs = self.take(1) 1362 if rs: 1363 return rs[0]

/home/ec2-user/spark/python/pyspark/rdd.pyc in take(self, num) 1311 """ 1312 items = [] -> 1313 totalParts = self.getNumPartitions() 1314 partsScanned = 0 1315

/home/ec2-user/spark/python/pyspark/rdd.pyc in getNumPartitions(self) 2438 2439 def getNumPartitions(self): -> 2440 return self._prev_jrdd.partitions().size() 2441 2442 @property

/home/ec2-user/spark/python/lib/py4j-0.10.4-src.zip/py4j/java_gateway.py in call(self, *args) 1131 answer = self.gateway_client.send_command(command) 1132 return_value = get_return_value( -> 1133 answer, self.gateway_client, self.target_id, self.name) 1134 1135 for temp_arg in temp_args:

/home/ec2-user/spark/python/pyspark/sql/utils.pyc in deco(*a, **kw) 61 def deco(*a, **kw): 62 try: ---> 63 return f(*a, **kw) 64 except py4j.protocol.Py4JJavaError as e: 65 s = e.java_exception.toString()

/home/ec2-user/spark/python/lib/py4j-0.10.4-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name) 317 raise Py4JJavaError( 318 "An error occurred while calling {0}{1}{2}.\n". --> 319 format(target_id, ".", name), value) 320 else: 321 raise Py4JError(

Py4JJavaError: An error occurred while calling o122.partitions. : org.apache.hadoop.mapred.InvalidInputException: Input path does not exist: file:/home/ec2-user/PR_DATA_35.csv at org.apache.hadoop.mapred.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:285) at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:228) at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:313) at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:194) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250) at scala.Option.getOrElse(Option.scala:121) at org.apache.spark.rdd.RDD.partitions(RDD.scala:250) at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252) at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250) at scala.Option.getOrElse(Option.scala:121) at org.apache.spark.rdd.RDD.partitions(RDD.scala:250) at org.apache.spark.api.java.JavaRDDLike$class.partitions(JavaRDDLike.scala:61) at org.apache.spark.api.java.AbstractJavaRDDLike.partitions(JavaRDDLike.scala:45) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244) at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357) at py4j.Gateway.invoke(Gateway.java:280) at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132) at py4j.commands.CallCommand.execute(CallCommand.java:79) at py4j.GatewayConnection.run(GatewayConnection.java:214) at java.lang.Thread.run(Thread.java:748)

1

1 Answers

0
votes

Since you are in AWS already, it may be easier to just store your data files in s3, and open them directly from there.