COMPLETE CODE
Below code will scan you hbase table for rowkeys containing a substring in common and deletes them when size of list is > 1000(this is to make sure that your list does not blow out of space/heap space) .Code also writes these row keys to your hdfs .
Driver
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.oclc.wcsync.hadoop.mapper.HbaseBulkDeleteMapper;
import org.oclc.wcsync.hadoop.util.JobName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Properties;
public class HbaseBulkDelete extends Configured implements Tool{
private static final Logger LOG = LoggerFactory.getLogger(HbaseBulkDelete.class);
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(HBaseConfiguration.create(), new HbaseBulkDelete(), args);
System.exit(res);
}
@Override
public int run(String[] strings) throws Exception {
JobName jobName = JobName.HBASE_DELETE;
LOG.info ("Got into class driver");
Configuration conf = HBaseConfiguration.create ();
String env = "prod";
Properties hadoopProps = new Properties();
hadoopProps.load(HbaseBulkDelete.class.getResourceAsStream("/hadoop.config." + env + ".properties"));
conf.set("jobName", jobName.name());
conf.set ("hbase.master.catalog.timeout","600000");
conf.set ("hbase.client.scanner.timeout.period","600000");
conf.set ("hbase.rpc.timeout","6000000");
conf.set ("mapred.task.timeout","6000000");
conf.set("mapreduce.map.memory.mb","4096");
Job job = new Job(conf);
job.setJobName(jobName.format("HbaseBulkDelete"));
job.setJarByClass(HbaseBulkDelete.class);
Scan s = new Scan ();
s.addFamily(Bytes.toBytes("data"));
s.setStartRow (Bytes.toBytes ("Your_Substring"));
TableMapReduceUtil.initTableMapperJob ("Ingest", s, HbaseBulkDeleteMapper.class, TextOutputFormat.class,
TextOutputFormat.class, job);
job.setNumReduceTasks(0);
job.setOutputFormatClass(TextOutputFormat.class);
FileOutputFormat.setOutputPath(job, new Path("/user/neethu/HbaseBulkDelete"));
return job.waitForCompletion(true) ? 0 : -1;
}
}
MAPPER
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class HbaseBulkDeleteMapper extends TableMapper<Text, Text> {
private static final Logger LOG = LoggerFactory.getLogger(HbaseBulkDeleteMapper.class);
Configuration conf;
@Override
protected void setup(Context context) throws IOException, InterruptedException {
conf = context.getConfiguration();
}
List<Delete> listOfBatchDelete = new ArrayList<Delete> ();
@Override
public void map(ImmutableBytesWritable row, Result values, Context context)
throws IOException, InterruptedException {
HTable table= new HTable(conf,"Ingest");
if (listOfBatchDelete != null && !listOfBatchDelete.isEmpty () && listOfBatchDelete.size () > 1000) {
LOG.info ("Deleted records!");
listOfBatchDelete.clear ();
}
String KEY=Bytes.toString(values.getRow ());
try {
if (KEY.contains ("Your_substring") ){
LOG.info ("RowKey:"+KEY );
Delete d=new Delete(Bytes.toBytes(KEY));
listOfBatchDelete.add(d);
context.write (new Text ("RowKey"), new Text (KEY));
}
} catch (Exception e) {
LOG.error ("error ---" + e);
}
}
}