0
votes

The is actually related to the question How can I add row numbers for rows in PIG or HIVE?

The 3rd answer provided by srini works fine, but I have trouble to access the data after the udf.

The udf provided by srini is following

import java.io.IOException;
import java.util.Iterator;
import org.apache.pig.EvalFunc;
import org.apache.pig.backend.executionengine.ExecException;
import org.apache.pig.data.BagFactory;
import org.apache.pig.data.DataBag;
import org.apache.pig.data.Tuple;
import org.apache.pig.data.TupleFactory;
import org.apache.pig.impl.logicalLayer.schema.Schema;
import org.apache.pig.data.DataType;

public class RowCounter extends EvalFunc<DataBag> {
TupleFactory mTupleFactory = TupleFactory.getInstance();
BagFactory mBagFactory = BagFactory.getInstance();
public DataBag exec(Tuple input) throws IOException {
    try {
        DataBag output = mBagFactory.newDefaultBag();
        DataBag bg = (DataBag)input.get(0);
        Iterator it = bg.iterator();
        Integer count = new Integer(1);
        while(it.hasNext())
            { Tuple t = (Tuple)it.next();
              t.append(count);
              output.add(t);
              count = count + 1;
            }

        return output;
    } catch (ExecException ee) {
        // error handling goes here
        throw ee;
    }
}
public Schema outputSchema(Schema input) {
     try{
         Schema bagSchema = new Schema();
         bagSchema.add(new Schema.FieldSchema("RowCounter", DataType.BAG));

         return new Schema(new Schema.FieldSchema(getSchemaName(this.getClass().getName().toLowerCase(), input),
                                                bagSchema, DataType.BAG));
     }catch (Exception e){
        return null;
     }
    }
}

I wrote a simple test pig script as following

A = load 'input.txt' using PigStorage(' ') as (name:chararray, age:int);
/*
--A: {name: chararray,age: int}
(amy,56)
(bob,1)
(bob,9)
(amy,34)
(bob,20)
(amy,78)
*/
B = group A by name;
C = foreach B {
   orderedGroup = order A by age;
   generate myudfs.RowCounter(orderedGroup) as t;
}
/*
--C: {t: {(RowCounter: {})}}
({(amy,34,1),(amy,56,2),(amy,78,3)})
({(bob,1,1),(bob,9,2),(bob,20,3)})
*/
D = foreach C generate FLATTEN(t);
/*
D: {t::RowCounter: {}}
(amy,34,1)
(amy,56,2)
(amy,78,3)
(bob,1,1)
(bob,9,2)
(bob,20,3)
*/

The problem is how to use D in later operation. I tried multiple ways, but always got the following error

ava.lang.ClassCastException: java.lang.String cannot be cast to org.apache.pig.data.DataBag
    at org.apache.pig.backend.hadoop.executionengine.physicalLayer.expressionOperators.POProject.processInputBag(POProject.java:575)
    at org.apache.pig.backend.hadoop.executionengine.physicalLayer.expressionOperators.POProject.getNext(POProject.java:248)
    at org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.getNext(PhysicalOperator.java:316)
    at org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POForEach.processPlan(POForEach.java:332)
    at org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POForEach.getNext(POForEach.java:284)
    at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigGenericMapReduce$Reduce.runPipeline(PigGenericMapReduce.java:459)
    at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigGenericMapReduce$Reduce.processOnePackageOutput(PigGenericMapReduce.java:427)
    at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigGenericMapReduce$Reduce.reduce(PigGenericMapReduce.java:407)
    at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigGenericMapReduce$Reduce.reduce(PigGenericMapReduce.java:261)
    at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:176)
    at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:572)
    at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:414)
    at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:256)

My guess is that because we don't have the schema for the tuple inside the bag. if this is the reason, how should I modify the udf?

1
ok, I found the solution - user1591487

1 Answers

0
votes

ok, I found the solution by adding the outputSchema as following

public Schema outputSchema(Schema input) {
   try{
      Schema.FieldSchema counter = new Schema.FieldSchema("counter", DataType.INTEGER);
      Schema tupleSchema = new Schema(input.getField(0).schema.getField(0).schema.getFields());
      tupleSchema.add(counter);

      Schema.FieldSchema tupleFs;
      tupleFs = new Schema.FieldSchema("with_counter", tupleSchema, DataType.TUPLE);

      Schema bagSchema = new Schema(tupleFs);
      return new Schema(new Schema.FieldSchema("row_counter",
                                                bagSchema, DataType.BAG));
     }catch (Exception e){
        return null;
     }
  } 
}

Thanks.