0
votes

Exception in thread "main" java.nio.channels.ClosedChannelException at kafka.network.BlockingChannel.send(BlockingChannel.scala:100) at kafka.consumer.SimpleConsumer.liftedTree1$1(SimpleConsumer.scala:78) at kafka.consumer.SimpleConsumer.kafka$consumer$SimpleConsumer$$sendRequest(SimpleConsumer.scala:68) at kafka.consumer.SimpleConsumer.send(SimpleConsumer.scala:91) at kafka.javaapi.consumer.SimpleConsumer.send(SimpleConsumer.scala:68) at cmb.SparkStream.kafka.kafkaOffsetTool.getTopicOffsets(kafkaOffsetTool.java:47) at cmb.SparkStream.LogClassify.main(LogClassify.java:95) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:729) at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:185) at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:210) at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:124) at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)

my code is:

 public static Map<TopicAndPartition, Long> getTopicOffsets(String zkServers, String topic) {

  Map<TopicAndPartition, Long> retVals = new HashMap<TopicAndPartition, Long>();
  for (String zkserver : zkServers.split(",")) {
   SimpleConsumer simpleConsumer = new SimpleConsumer(zkserver.split(":")[0],
     Integer.valueOf(zkserver.split(":")[1]), Consts.getKafkaConfigBean().getDuration(), 1024,
     "consumser");
   TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(Arrays.asList(topic));

   TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);

   for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
    for (PartitionMetadata part : metadata.partitionsMetadata()) {
     Broker leader = part.leader();
     if (leader != null) {
      TopicAndPartition topicAndPartition = new TopicAndPartition(topic, part.partitionId());

      PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(
        kafka.api.OffsetRequest.LatestTime(), 10000);
      OffsetRequest offsetRequest = new OffsetRequest(
        ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo),
        kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId());
      OffsetResponse offsetResponse = simpleConsumer.getOffsetsBefore(offsetRequest);
      if (!offsetResponse.hasError()) {
       long[] offsets = offsetResponse.offsets(topic, part.partitionId());
       retVals.put(topicAndPartition, offsets[0]);
      }
     }

    }

   }
   simpleConsumer.close();
  }
  return retVals;
 }
1

1 Answers

1
votes

I think that you might be overcomplicating things. Use org.apache.kafka.clients.consumer.KafkaConsumer (consumer here) and do something similar to

    val partitions = consumer.partitionsFor(topic).map[new TopicPartition(topic,it.partition)]
    consumer.assign(partitions)
    consumer.seekToEnd(partitions)
    val offsets = partitions.map[ it -> consumer.position(it)]
    println(offsets)

and you will get results like

[topicname-8->1917258, topicname-2->1876810, topicname-5->1857012, topicname-4->3844, topicname-7->4043972, topicname-1->1811078, topicname-9->12217819, topicname-3->3844, topicname-6->1430021, topicname-0->2808969]