hadoop 计数器 Counter
0 计数器相当于开车中的仪表盘,用于提醒功能, 计数可以再map阶段,也可以在reduce阶段
1 写法代码:
package counter; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; /** * 实现单词计数功能 * 增加自定义计数器功能 * 测试文件 hello内容为: * hello you * hello me me me * @author zm * * 计数器相当于开车中的仪表盘,用于提醒功能 */ public class MyWordCounter { static String FILE_ROOT = "hdfs://master:9000/"; static String FILE_INPUT = "hdfs://master:9000/hello"; static String FILE_OUTPUT = "hdfs://master:9000/out"; public static void main(String[] args) throws IOException, URISyntaxException, InterruptedException, ClassNotFoundException { Configuration conf = new Configuration(); FileSystem fileSystem = FileSystem.get(new URI(FILE_ROOT),conf); Path outpath = new Path(FILE_OUTPUT); if(fileSystem.exists(outpath)){ fileSystem.delete(outpath, true); } // 0 定义干活的人 Job job = new Job(conf); // 1.1 告诉干活的人 输入流位置 读取hdfs中的文件。每一行解析成一个<k,v>。每一个键值对调用一次map函数 FileInputFormat.setInputPaths(job, FILE_INPUT); // 指定如何对输入文件进行格式化,把输入文件每一行解析成键值对 job.setInputFormatClass(TextInputFormat.class); //1.2 指定自定义的map类 job.setMapperClass(MyMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); //1.3 分区 job.setNumReduceTasks(1); //1.4 TODO 排序、分组 目前按照默认方式执行 //1.5 TODO 规约 //2.2 指定自定义reduce类 job.setReducerClass(MyReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(LongWritable.class); //2.3 指定写出到哪里 FileOutputFormat.setOutputPath(job, outpath); job.setOutputFormatClass(TextOutputFormat.class); // 让干活的人干活 job.waitForCompletion(true); } } /** * 继承mapper 覆盖map方法,hadoop有自己的参数类型 * 读取hdfs中的文件。每一行解析成一个<k,v>。每一个键值对调用一次map函数, * 这样,对于文件hello而言,调用MyMapper方法map后得到结果: * <hello,1>,<you,1>,<hello,1>,<me,1> * 方法后,得到结果为: * KEYIN, 行偏移量 * VALUEIN, 行文本内容(当前行) * KEYOUT, 行中出现的单词 * VALUEOUT 行中出现单词次数,这里固定写为1 * */ class MyMapper extends Mapper<LongWritable, Text, Text, LongWritable>{ @Override protected void map(LongWritable k1, Text v1, Context context) throws IOException, InterruptedException { //定义计数器 Counter helloCounter = context.getCounter("Sensitive Words", "hello"); String line = v1.toString(); if(line.contains("hello")){ helloCounter.increment(1); } String[] v1s = v1.toString().split("\t"); for(String word : v1s){ context.write(new Text(word), new LongWritable(1)); } } } /** * <hello,{1,1}>,<me,{1}>,<you,{1}>, 每个分组调用一次 reduce方法 * * KEYIN, 行中出现单词 * VALUEIN, 行中出现单词个数 * KEYOUT, 文件中出现不同单词 * VALUEOUT 文件中出现不同单词总个数 */ class MyReducer extends Reducer<Text, LongWritable, Text, LongWritable>{ protected void reduce(Text k2, Iterable<LongWritable> v2s, Context ctx) throws IOException, InterruptedException { long times = 0L; for(LongWritable l : v2s){ times += l.get(); } ctx.write(k2, new LongWritable(times)); } }
2 运行后计数结果不会写入到hdfs中,仅仅用于一个展示效果,如下:
............ 14/12/08 19:05:20 INFO mapred.JobClient: Counters: 20 14/12/08 19:05:20 INFO mapred.JobClient: Sensitive Words 14/12/08 19:05:20 INFO mapred.JobClient: hello=2 ......
相关推荐
minerd 2020-10-28
Kafka 2020-09-18
Wepe0 2020-10-30
杜倩 2020-10-29
windle 2020-10-29
mengzuchao 2020-10-22
Junzizhiai 2020-10-10
bxqybxqy 2020-09-30
风之沙城 2020-09-24
kingszelda 2020-09-22
大唐帝国前营 2020-08-18
yixu0 2020-08-17
TangCuYu 2020-08-15
xiaoboliu00 2020-08-15
songshijiazuaa 2020-08-15
xclxcl 2020-08-03
zmzmmf 2020-08-03
newfarhui 2020-08-03
likesyour 2020-08-01