赞
踩
注意此篇是针对使用旧版消费的方案, 旧版(0.8之前)offset信息存在zk,新版(0.9以后)存在topic中。如果不知道自己是什么版本的,请看这篇:
新旧版消费代码
一般监控kafka消费情况我们可以使用现成的工具来查看,但如果发生大量延迟不能及时知道。所以问题就来了,怎么用java api 进行kafka的监控呢?
用过kafka都该知道 延迟量 lag = logSize(topic记录量) - offset(消费组消费进度)
所以我们获取到logSize / offset 就可以了。
鉴于这部分信息网上资料非常少,特地将代码抛出来。
我使用的jar:
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>0.10.1.1</version>
package com.fengjr.elk.web.write;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.Map.Entry;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.ErrorMapping;
import kafka.common.OffsetMetadataAndError;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.network.BlockingChannel;
public class KafkaOffsetTools {
public static void main(String[] args) {
String topic = "app-log-all-beta";
String broker = "10.255.73.160";
int port = 9092;
String group = "fengjr-elk-group-es";
String clientId = "Client_app-log-all-beta_1";
int correlationId = 0;
BlockingChannel channel = new BlockingChannel(broker, port,
BlockingChannel.UseDefaultBufferSize(),
BlockingChannel.UseDefaultBufferSize(),
5000 );
channel.connect();
List<String> seeds = new ArrayList<String>();
seeds.add(broker);
KafkaOffsetTools kot = new KafkaOffsetTools();
TreeMap<Integer,PartitionMetadata> metadatas = kot.findLeader(seeds, port, topic);
long sum = 0l;
long sumOffset = 0l;
long lag = 0l;
List<TopicAndPartition> partitions = new ArrayList<TopicAndPartition>();
for (Entry<Integer,PartitionMetadata> entry : metadatas.entrySet()) {
int partition = entry.getKey();
TopicAndPartition testPartition = new TopicAndPartition(topic, partition);
partitions.add(testPartition);
}
OffsetFetchRequest fetchRequest = new OffsetFetchRequest(
group,
partitions,
(short) 0,
correlationId,
clientId);
for (Entry<Integer,PartitionMetadata> entry : metadatas.entrySet()) {
int partition = entry.getKey();
try {
channel.send(fetchRequest.underlying());
OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().payload());
TopicAndPartition testPartition0 = new TopicAndPartition(topic, partition);
OffsetMetadataAndError result = fetchResponse.offsets().get(testPartition0);
short offsetFetchErrorCode = result.error();
if (offsetFetchErrorCode == ErrorMapping.NotCoordinatorForConsumerCode()) {
} else {
long retrievedOffset = result.offset();
sumOffset += retrievedOffset;
}
String leadBroker = entry.getValue().leader().host();
String clientName = "Client_" + topic + "_" + partition;
SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000,
64 * 1024, clientName);
long readOffset = getLastOffset(consumer, topic, partition,
kafka.api.OffsetRequest.LatestTime(), clientName);
sum += readOffset;
System.out.println(partition+":"+readOffset);
if(consumer!=null)consumer.close();
} catch (Exception e) {
channel.disconnect();
}
}
System.out.println("logSize:"+sum);
System.out.println("offset:"+sumOffset);
lag = sum - sumOffset;
System.out.println("lag:"+ lag);
}
public KafkaOffsetTools() {
}
public static long getLastOffset(SimpleConsumer consumer, String topic,
int partition, long whichTime, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic,
partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(),
clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
System.out
.println("Error fetching data Offset Data the Broker. Reason: "
+ response.errorCode(topic, partition));
return 0;
}
long[] offsets = response.offsets(topic, partition);
return offsets[0];
}
private TreeMap<Integer,PartitionMetadata> findLeader(List<String> a_seedBrokers,
int a_port, String a_topic) {
TreeMap<Integer, PartitionMetadata> map = new TreeMap<Integer, PartitionMetadata>();
loop: for (String seed : a_seedBrokers) {
SimpleConsumer consumer = null;
try {
consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024,
"leaderLookup"+new Date().getTime());
List<String> topics = Collections.singletonList(a_topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
map.put(part.partitionId(), part);
}
}
} catch (Exception e) {
System.out.println("Error communicating with Broker [" + seed
+ "] to find Leader for [" + a_topic + ", ] Reason: " + e);
} finally {
if (consumer != null)
consumer.close();
}
}
return map;
}
}
输出:
logSize:2620569947
offset:2620567473
lag:2474
参考:kafka
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。