当前位置:   article > 正文

自定义InputFormat、自定义OutputFormat_自定义inputformat实现

自定义inputformat实现

1.自定义inputFormat合并小文件

需求

无论hdfs还是mapreduce,对于小文件都有损效率,实践中,又难免面临处理大量小文件的场景,此时,就需要有相应解决方案, 将多个小文件合并成一个文件 SequenceFile.SequenceFile 里面存储着多个文件。存储的形式为文件名称为 key,文件内容为 value。

分析

小文件的优化无非以下几种方式:

  1. 在数据采集的时候,就将小文件或小批数据合成大文件再上传HDFS
  2. 在业务处理之前,在HDFS上使用mapreduce程序对小文件进行合并
  3. 在mapreduce处理时,可采用combineInputFormat提高效率

实现

本节实现的是上述第二种方式

程序的核心机制:

自定义一个InputFormat

改写RecordReader,实现一次读取一个完整文件封装为KV

在输出时使用SequenceFileOutPutFormat输出合并文件

第一步:自定义InputFormat类

  1. import org.apache.hadoop.fs.Path;
  2. import org.apache.hadoop.io.BytesWritable;
  3. import org.apache.hadoop.io.NullWritable;
  4. import org.apache.hadoop.mapreduce.InputSplit;
  5. import org.apache.hadoop.mapreduce.JobContext;
  6. import org.apache.hadoop.mapreduce.RecordReader;
  7. import org.apache.hadoop.mapreduce.TaskAttemptContext;
  8. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  9. import java.io.IOException;
  10. public class Custom_FileInputFormat extends FileInputFormat<NullWritable,BytesWritable> {
  11. @Override
  12. protected boolean isSplitable(JobContext context, Path filename) {
  13. return false;
  14. }
  15. @Override
  16. public RecordReader<NullWritable, BytesWritable> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
  17. Custom_RecordReader reader = new Custom_RecordReader();
  18. reader.initialize(inputSplit,taskAttemptContext);
  19. return reader;
  20. }
  21. }

第二步:自定义RecordReader类

  1. import org.apache.commons.io.IOUtils;
  2. import org.apache.hadoop.conf.Configuration;
  3. import org.apache.hadoop.fs.FSDataInputStream;
  4. import org.apache.hadoop.fs.FileSystem;
  5. import org.apache.hadoop.fs.Path;
  6. import org.apache.hadoop.io.BytesWritable;
  7. import org.apache.hadoop.io.NullWritable;
  8. import org.apache.hadoop.mapreduce.InputSplit;
  9. import org.apache.hadoop.mapreduce.RecordReader;
  10. import org.apache.hadoop.mapreduce.TaskAttemptContext;
  11. import org.apache.hadoop.mapreduce.lib.input.FileSplit;
  12. import java.io.IOException;
  13. public class Custom_RecordReader extends RecordReader<NullWritable,BytesWritable> {
  14. private FileSplit fileSplit;
  15. private Configuration conf;
  16. private BytesWritable bytesWritable = new BytesWritable();
  17. private boolean processed = false;
  18. @Override
  19. public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
  20. fileSplit = (FileSplit) inputSplit;
  21. conf = taskAttemptContext.getConfiguration();
  22. }
  23. @Override
  24. public boolean nextKeyValue() throws IOException, InterruptedException {
  25. if (!processed) {
  26. Path path = fileSplit.getPath();
  27. FileSystem fileSystem = null;
  28. FSDataInputStream inputStream = null;
  29. try {
  30. fileSystem = FileSystem.get(conf);
  31. //读取文件
  32. inputStream = fileSystem.open(path);
  33. //初始化一个字节数组,长度为读取内容的大小
  34. byte[] bytes = new byte[(int) fileSplit.getLength()];
  35. //读取到数组中
  36. IOUtils.readFully(inputStream, bytes, 0, bytes.length);
  37. //把字节数组转换成bytesWritable对象
  38. bytesWritable.set(bytes, 0, bytes.length);
  39. } catch (Exception e) {
  40. e.printStackTrace();
  41. } finally {
  42. fileSystem.close();
  43. inputStream.close();
  44. }
  45. processed = true;
  46. return true;
  47. }else {
  48. return false;
  49. }
  50. }
  51. @Override
  52. public NullWritable getCurrentKey() throws IOException, InterruptedException {
  53. return NullWritable.get();
  54. }
  55. @Override
  56. public BytesWritable getCurrentValue() throws IOException, InterruptedException {
  57. return bytesWritable;
  58. }
  59. @Override
  60. public float getProgress() throws IOException, InterruptedException {
  61. return processed?1.0F:0.0F;
  62. }
  63. @Override
  64. public void close() throws IOException {
  65. }
  66. }

第三步:编写Mapper类

  1. import org.apache.hadoop.io.BytesWritable;
  2. import org.apache.hadoop.io.NullWritable;
  3. import org.apache.hadoop.io.Text;
  4. import org.apache.hadoop.mapreduce.Mapper;
  5. import org.apache.hadoop.mapreduce.lib.input.FileSplit;
  6. import java.io.IOException;
  7. public class Custom_Mapper extends Mapper<NullWritable,BytesWritable,Text,BytesWritable> {
  8. @Override
  9. protected void map(NullWritable key, BytesWritable value, Context context) throws IOException, InterruptedException {
  10. FileSplit fileSplit = (FileSplit) context.getInputSplit();
  11. String name = fileSplit.getPath().getName();
  12. context.write(new Text(name),value);
  13. }
  14. }

 第四步:编写Driver主类:

  1. import org.apache.hadoop.conf.Configuration;
  2. import org.apache.hadoop.conf.Configured;
  3. import org.apache.hadoop.fs.Path;
  4. import org.apache.hadoop.io.BytesWritable;
  5. import org.apache.hadoop.io.Text;
  6. import org.apache.hadoop.mapreduce.Job;
  7. import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
  8. import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
  9. import org.apache.hadoop.util.Tool;
  10. import org.apache.hadoop.util.ToolRunner;
  11. import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
  12. public class Custom_Driver extends Configured implements Tool {
  13. @Override
  14. public int run(String[] args) throws Exception {
  15. Job job = Job.getInstance(new Configuration());
  16. job.setInputFormatClass(Custom_FileInputFormat.class);
  17. TextInputFormat.addInputPath(job,new Path("E:\\自定义InputFormat\\input\\"));
  18. job.setMapperClass(Custom_Mapper.class);
  19. job.setMapOutputKeyClass(Text.class);
  20. job.setMapOutputValueClass(BytesWritable.class);
  21. job.setOutputKeyClass(Text.class);
  22. job.setOutputValueClass(BytesWritable.class);
  23. job.setOutputFormatClass(SequenceFileOutputFormat.class);
  24. TextOutputFormat.setOutputPath(job,new Path("E:\\自定义InputFormat\\output\\"));
  25. return job.waitForCompletion(true)?0:1;
  26. }
  27. public static void main(String[] args) throws Exception {
  28. ToolRunner.run(new Custom_Driver(),args);
  29. }
  30. }

 

2.自定义OutputFormat

需求

现在有一些订单的评论数据,需求,将订单的好评与差评进行区分开来,将最终的数据分开到不同的文件夹下面去,数据内容参见资料文件夹,其中数据第九个字段表示好评,中评,差评。0:好评,1:中评,2:差评

分析

程序的关键点是要在一个mapreduce程序中根据数据的不同输出两类结果到不同目录,这类灵活的输出需求可以通过自定义outputformat来实现

实现

实现要点:

  1. 在mapreduce中访问外部资源
  2. 自定义outputformat,改写其中的recordwriter,改写具体输出数据的方法write()

第一步:自定义outputformat类

  1. import org.apache.hadoop.conf.Configuration;
  2. import org.apache.hadoop.fs.FSDataOutputStream;
  3. import org.apache.hadoop.fs.FileSystem;
  4. import org.apache.hadoop.fs.Path;
  5. import org.apache.hadoop.io.NullWritable;
  6. import org.apache.hadoop.io.Text;
  7. import org.apache.hadoop.mapreduce.RecordWriter;
  8. import org.apache.hadoop.mapreduce.TaskAttemptContext;
  9. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  10. import java.io.IOException;
  11. public class Custom_OutputFormat extends FileOutputFormat<Text,NullWritable> {
  12. @Override
  13. public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
  14. Configuration conf = taskAttemptContext.getConfiguration();
  15. //获取fileSystem
  16. FileSystem fileSystem = FileSystem.get(conf);
  17. FSDataOutputStream fsDataOutputStream = fileSystem.create(new Path("M:\\自定义outputformat\\output\\good_cmment\\good_cmment.txt"));
  18. FSDataOutputStream fsDataOutputStream1 = fileSystem.create(new Path("M:\\自定义outputformat\\output\\bad_cmment\\bad_cmment.txt"));
  19. return new Custom_RecordWriter(fsDataOutputStream,fsDataOutputStream1); //调用自定义的RecordWriter的满参构造传递两个输出流
  20. }
  21. }

第二步:自定义RecordWrite类

  1. import org.apache.hadoop.fs.FSDataOutputStream;
  2. import org.apache.hadoop.io.NullWritable;
  3. import org.apache.hadoop.io.Text;
  4. import org.apache.hadoop.mapreduce.RecordWriter;
  5. import org.apache.hadoop.mapreduce.TaskAttemptContext;
  6. import java.io.IOException;
  7. public class Custom_RecordWriter extends RecordWriter<Text,NullWritable> {
  8. FSDataOutputStream out1;
  9. FSDataOutputStream out2;
  10. public Custom_RecordWriter() {
  11. }
  12. public Custom_RecordWriter(FSDataOutputStream out1, FSDataOutputStream out2) {
  13. this.out1 = out1;
  14. this.out2 = out2;
  15. }
  16. public FSDataOutputStream getOut1() {
  17. return out1;
  18. }
  19. public void setOut1(FSDataOutputStream out1) {
  20. this.out1 = out1;
  21. }
  22. public FSDataOutputStream getOut2() {
  23. return out2;
  24. }
  25. public void setOut2(FSDataOutputStream out2) {
  26. this.out2 = out2;
  27. }
  28. @Override
  29. public void write(Text key, NullWritable value) throws IOException, InterruptedException {
  30. if (key.toString().split("\t")[9].equals("0")){
  31. //索引为9的是好评
  32. out1.write(key.toString().getBytes());
  33. out1.write("\r\n".getBytes());
  34. }else{
  35. //中评和差评
  36. out2.write(key.toString().getBytes());
  37. out2.write("\r\n".getBytes());
  38. }
  39. }
  40. @Override
  41. public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
  42. if (null != out1){
  43. out1.close();
  44. }
  45. if (null != out2){
  46. out2.close();
  47. }
  48. }
  49. }

第三步:编写Mapper类

  1. import org.apache.hadoop.io.LongWritable;
  2. import org.apache.hadoop.io.NullWritable;
  3. import org.apache.hadoop.io.Text;
  4. import org.apache.hadoop.mapreduce.Mapper;
  5. import java.io.IOException;
  6. public class Custom_Mapper extends Mapper<LongWritable,Text,Text,NullWritable> {
  7. @Override
  8. protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
  9. context.write(value,NullWritable.get());
  10. }
  11. }

第四步:编写Driver主类

  1. import org.apache.hadoop.conf.Configured;
  2. import org.apache.hadoop.fs.Path;
  3. import org.apache.hadoop.io.NullWritable;
  4. import org.apache.hadoop.io.Text;
  5. import org.apache.hadoop.mapreduce.Job;
  6. import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
  7. import org.apache.hadoop.util.Tool;
  8. import org.apache.hadoop.util.ToolRunner;
  9. public class Custom_OutputFormatDriver extends Configured implements Tool {
  10. public static void main(String[] args) throws Exception {
  11. System.exit(ToolRunner.run(new Custom_OutputFormatDriver(),args));
  12. }
  13. @Override
  14. public int run(String[] args) throws Exception {
  15. Job job = Job.getInstance(super.getConf());
  16. //设置mapper加载类
  17. job.setMapperClass(Custom_Mapper.class);
  18. //设置map输出类型
  19. job.setMapOutputKeyClass(Text.class);
  20. job.setMapOutputValueClass(NullWritable.class);
  21. //设置InputFormat加载类
  22. job.setInputFormatClass(TextInputFormat.class);
  23. //设置自定义OutputFormat加载类
  24. job.setOutputFormatClass(Custom_OutputFormat.class);
  25. //设置读取和写出路径
  26. TextInputFormat.addInputPath(job,new Path("M:\\自定义outputformat\\input\\ordercomment.csv"));
  27. Custom_OutputFormat.setOutputPath(job,new Path("M:\\自定义outputformat\\output\\tmp"));
  28. return job.waitForCompletion(true)?0:1;
  29. }
  30. }

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/空白诗007/article/detail/777839
推荐阅读
相关标签
  

闽ICP备14008679号