国外免费ip地址,关于进一步优化,网站建设归哪个部门,济南网上注册公司流程一、简介
在文件中统计出现最多个数的单词#xff0c;将其输出到hdfs文件上。 二、例子
#xff08;1#xff09;实例描述 给出三个文件#xff0c;每个文件中都若干个单词以空白符分隔#xff0c;需要统计出现最多的单词 …一、简介
在文件中统计出现最多个数的单词将其输出到hdfs文件上。 二、例子
1实例描述 给出三个文件每个文件中都若干个单词以空白符分隔需要统计出现最多的单词
样例输入 1file1
MapReduce is simple
2file2
MapReduce is powerful is simple
3file3
Hello MapReduce bye MapReduce
期望输出
MapReduce 42问题分析 实现统计出现最多个数的单词只要关注的信息为单词、词频。 3实现步骤
1Map过程
首先使用默认的TextInputFormat类对输入文件进行处理得到文本中每行的偏移量及其内容。显然Map过程首先必须分析输入的key,value对得到倒排索引中需要的三个信息单词、词频
2Combine过程 经过map方法处理后Combine过程将key值相同的value值累加得到一个单词在文档在文档中的词频输出作为Reduce过程的输入。
3Reduce过程 经过上述两个过程后Reduce过程只需将相同key值的value值累加保留最大词频的单词输出。 4代码实现
package com.mk.mapreduce;import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import java.io.IOException;
import java.net.URI;
import java.util.*;public class MaxWord {public static class MaxWordMapper extends MapperLongWritable, Text, Text, IntWritable {private final Text newKey new Text();private final IntWritable newValue new IntWritable(1);Overrideprotected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {if (StringUtils.isBlank(value.toString())) {System.out.println(空白行);return;}StringTokenizer tokenizer new StringTokenizer(value.toString());while (tokenizer.hasMoreTokens()) {String word tokenizer.nextToken();newKey.set(word);context.write(newKey, newValue);}}}public static class MaxWordCombiner extends ReducerText, IntWritable, Text, IntWritable {private final IntWritable newValue new IntWritable();Overrideprotected void reduce(Text key, IterableIntWritable values, Context context) throws IOException, InterruptedException {int count 0;for (IntWritable v : values) {count v.get();}newValue.set(count);context.write(key, newValue);}}public static class MaxWordReducer extends ReducerText, IntWritable, Text, IntWritable {private String word null;private int count 0;Overrideprotected void reduce(Text key, IterableIntWritable values, Context context) throws IOException, InterruptedException {int c 0;for (IntWritable v : values) {c v.get();}if (word null || count c) {word key.toString();count c;}}Overrideprotected void cleanup(Context context) throws IOException, InterruptedException {if (word ! null) {context.write(new Text(word), new IntWritable(count));}}}public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {String uri hdfs://192.168.150.128:9000;String input /maxWord/input;String output /maxWord/output;Configuration conf new Configuration();if (System.getProperty(os.name).toLowerCase().contains(win))conf.set(mapreduce.app-submission.cross-platform, true);FileSystem fileSystem FileSystem.get(URI.create(uri), conf);Path path new Path(output);fileSystem.delete(path, true);Job job new Job(conf, MaxWord);job.setJar(./out/artifacts/hadoop_test_jar/hadoop-test.jar);job.setJarByClass(MaxWord.class);job.setMapperClass(MaxWordMapper.class);job.setCombinerClass(MaxWordCombiner.class);job.setReducerClass(MaxWordReducer.class);job.setMapOutputKeyClass(Text.class);job.setMapOutputValueClass(IntWritable.class);job.setOutputKeyClass(Text.class);job.setOutputValueClass(IntWritable.class);FileInputFormat.addInputPaths(job, uri input);FileOutputFormat.setOutputPath(job, new Path(uri output));boolean ret job.waitForCompletion(true);System.out.println(job.getJobName() ----- ret);}
}