赞
踩
- import java.io.IOException;
-
-
-
- import org.apache.hadoop.conf.Configuration;
-
- import org.apache.hadoop.fs.Path;
-
- import org.apache.hadoop.io.IntWritable;
-
- import org.apache.hadoop.io.Text;
-
- import org.apache.hadoop.mapreduce.Job;
-
- import org.apache.hadoop.mapreduce.Mapper;
-
- import org.apache.hadoop.mapreduce.Reducer;
-
- import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-
- import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-
-
-
- public class Sort {
-
-
-
- //map
-
- public static class SortMapper extends Mapper<Object,Text,IntWritable,IntWritable>{
-
- public void map(Object key,Text value,Context context)throws IOException,InterruptedException{
-
- String text = value.toString();
-
- int t = Integer.parseInt(text);
-
- context.write(new IntWritable(t),new IntWritable(1));
-
- }
- }
-
-
- //reduce
-
- public static class SortReducer extends Reducer<IntWritable,IntWritable,IntWritable,IntWritable>{
-
- int row=1;
-
- public void reduce(IntWritable key,Iterable<IntWritable> values,Context context)throws IOException,InterruptedException{
-
- for(IntWritable value:values) {
-
- context.write(new IntWritable(row),key);
-
- row=row+1;
-
- }
- }
- }
-
- //main
-
- public static void main(String[] args)throws Exception{
-
- Configuration conf=new Configuration();
-
- Job job=Job.getInstance(conf,"sort");
-
- job.setJarByClass(Sort.class);
-
- job.setMapperClass(SortMapper.class);
-
- job.setReducerClass(SortReducer.class);
-
- job.setOutputKeyClass(IntWritable.class);
-
- job.setOutputValueClass(IntWritable.class);
-
- FileInputFormat.addInputPath(job,new Path("input"));
-
- FileOutputFormat.setOutputPath(job,new Path("output"));
-
- System.exit(job.waitForCompletion(true)?0:1);
-
- }
-
- }
-
代码部分完成后对文件进行打包,打包步骤详细内容在上一篇文章MapReduce文件合并与去重中。
按照要求在usr/local/hadoop目录下创建两个文本文件A.txt B.txt ,之后进行如下操作:
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。