赞
踩
统计每一个手机号耗费的总上行流量、下行流量、总流量
时间戳、电话号码、基站的物理地址、访问网址的ip、网站域名、数据包、接包数、上行/传流量、下行/载流量、响应码
手机号码 上行流量 下行流量 总流量
(1) 读取一行数据,转换为字符串类型
(2) 切分字段
(3) 抽取手机号、上行流量、下行流量
(4)以手机号为key,bean对象(上行流量、下行流量、总流量)为value 进行封装
(5)文件写出,即context.write(手机号,bean)
(1) 遍历集合上行流量和下行流量总和得到总流量
(2)实现自定义的bean来封装流量信息,并将bean作为map输出的key来传输
(3)MR程序在处理数据的过程中会对数据排序(map输出的kv对传输到reduce之前,会排序),排序的依据是map输出的key
- package com.jike.hdfs;
-
- import org.apache.hadoop.io.Writable;
-
- import java.io.DataInput;
- import java.io.DataOutput;
- import java.io.IOException;
-
- //
- /*
- * 1.定义实现writable接口
- * 2.重写序列化和反序列化方法
- * 3.重写空参构造
- * 4.tostring方法
- * */
- public class FlowBean implements Writable{
- private long upFlow;
- private long downFlow;
- private long sumFlow;
-
-
- //空参构造
- public FlowBean() {
- }
-
- public long getUpFlow() {
- return upFlow;
- }
-
- public void setUpFlow(long upFlow) {
- this.upFlow = upFlow;
- }
-
- public long getDownFlow() {
- return downFlow;
- }
-
- public void setDownFlow(long downFlow) {
- this.downFlow = downFlow;
- }
-
- public long getSumFlow() {
- return sumFlow;
- }
-
- public void setSumFlow() {
- this.sumFlow = this.upFlow + this.downFlow;
- }
-
- public void write(DataOutput dataOutput) throws IOException {
- dataOutput.writeLong(upFlow);
- dataOutput.writeLong(downFlow);
- dataOutput.writeLong(sumFlow);
- }
-
- public void readFields(DataInput dataInput) throws IOException {
- this.upFlow = dataInput.readLong();
- this.downFlow = dataInput.readLong();
- this.sumFlow = dataInput.readLong();
- }
-
- @Override
- public String toString() {
- return upFlow +"\t" + downFlow +"\t" + sumFlow ;
- }
- }
-
- package com.jike.hdfs;
-
-
- import org.apache.hadoop.io.LongWritable;
- import org.apache.hadoop.io.Text;
- import org.apache.hadoop.mapreduce.Mapper;
- import java.io.IOException;
-
- public class FlowBeanMapper extends Mapper<LongWritable,Text, Text,FlowBean> {
- private Text outK = new Text();
- private FlowBean outV = new FlowBean();
-
- @Override
- protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
- //1.获取和一行信息
- String line = value.toString();
-
- //2.切割
- //时间戳、 电话号码、 基站的物理地址、 访问网址的ip、 网站域名、 数据包、接包数、上行/传流量、下行/载流量、响应码
- // 1363157985066 13726230503 00-FD-07-A4-72-B8:CMCC 120.196.100.82 i02.c.aliimg.com 24 27 2481 24681 200
- String[] split = line.split("\t");
-
- //3.抓取数据
- String phoneno = split[1];
- String upflow = split[split.length-3];
- String downflow = split[split.length-2];
-
- //4.封装
- outK.set(phoneno);
- outV.setUpFlow(Long.parseLong(upflow));
- outV.setDownFlow(Long.parseLong(downflow));
- outV.setSumFlow();
-
- //5.写出
- context.write(outK,outV);
- }
- }
- package com.jike.hdfs;
-
- import org.apache.hadoop.io.Text;
- import org.apache.hadoop.mapreduce.Reducer;
-
- import java.io.IOException;
-
- public class FlowBeanReducer extends Reducer<Text, FlowBean, Text, FlowBean> {
- private FlowBean outV = new FlowBean();
-
- @Override
- protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
- //1.遍历集合累加值
- long totalup = 0;
- long totaldown = 0;
- for (FlowBean value : values) {
- totalup += value.getUpFlow();
- totaldown += value.getDownFlow();
- }
- //2.封装
- outV.setUpFlow(totalup);
- outV.setDownFlow(totaldown);
- outV.setSumFlow();
-
- //3.写出
- context.write(key,outV);
- }
-
- }
- package com.jike.hdfs;
-
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.io.Text;
- import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
- import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
- /*import org.apache.hadoop.mapred.FileInputFormat;
- import org.apache.hadoop.mapred.FileOutputFormat;*/ //这个是老的包,废弃了,需要用新的包才可以
- import org.apache.hadoop.mapreduce.Job;
- import java.io.IOException;
-
- public class FlowBeanDriver {
- public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
- //1.获取job
- Configuration conf = new Configuration();
- Job job = Job.getInstance(conf);
-
- //2.设置jar
- job.setJarByClass(FlowBeanDriver.class);
- //3.关联mapper Reducer
- job.setMapperClass(FlowBeanMapper.class);
- job.setReducerClass(FlowBeanReducer.class);
- //4.设置 mapper 输出key 和 value 类型
- job.setMapOutputKeyClass(Text.class);
- job.setMapOutputValueClass(FlowBean.class);
- //5.设置最终数据输出key 和value 类型
- job.setOutputKeyClass(Text.class);
- job.setOutputValueClass(FlowBean.class);
- //6.设置数据的输入和输出路径
- FileInputFormat.setInputPaths(job, new Path("D:\\BigData\\input"));
- FileOutputFormat.setOutputPath(job, new Path("D:\\BigData\\output1"));
- //7.提交job
- boolean result = job.waitForCompletion(true);
- System.exit(result ? 0 : 1);
- }
- }
参考文章: https://blog.csdn.net/zhao2chen3/article/details/110201664
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。