应用MapReduce(1)
应用MapReduce(1)
编写一个数据去重的MapReduce应用
一、准备数据
文件1
200001-3-1 a
200001-3-2 b
200001-3-3 c
200001-3-4 d
200001-3-5 a
200001-3-6 b
200001-3-7 c
200001-3-3 c
文件2
200002-3-1 a
200002-3-2 b
200002-3-3 c
200001-3-4 d
200001-3-5 a
200002-3-6 b
200002-3-7 c
200001-3-3 c
二、代码实现
import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; public class Dedup { // map将输入中的value复制到输出数据的key上,并直接输出 public static class Map extends Mapper<Object, Text, Text, Text> { private static Text line = new Text();// 每行数据 // 实现map函数 public void map(Object key, Text value, Context context) throws IOException, InterruptedException { line = value; context.write(line, new Text("")); } } // reduce将输入中的key复制到输出数据的key上,并直接输出 public static class Reduce extends Reducer<Text, Text, Text, Text> { // 实现reduce函数 public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { context.write(key, new Text("")); } } public static void main(String[] args) throws Exception { System.setProperty("hadoop.home.dir", "/zrjapp/hadoop-2.8.1"); //System.setProperty("hadoop.home.dir", "D:\\hadoop-2.8.1"); Configuration conf = new Configuration(); // 这句话很关键 conf.set("mapred.job.tracker", "localhost:9001"); //conf.set("mapred.job.tracker", "192.168.1.177:9001"); String[] ioArgs = new String[] {"/zrjapp/hadoop-2.8.1/file", "/zrjapp/hadoop-2.8.1/output"}; //String[] ioArgs = new String[] {"D:\\hadoop-2.8.1\\file", "D:\\hadoop-2.8.1\\output"}; String[] otherArgs = new GenericOptionsParser(conf, ioArgs).getRemainingArgs(); if (otherArgs.length != 2) { System.err.println("Usage: Data Deduplication <in> <out>"); System.exit(2); } Job job = new Job(conf, "Data Deduplication"); job.setJarByClass(Dedup.class); // 设置Map、Combine和Reduce处理类 job.setMapperClass(Map.class); job.setCombinerClass(Reduce.class); job.setReducerClass(Reduce.class); // 设置输出类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); // 设置输入和输出目录 FileInputFormat.addInputPath(job, new Path(otherArgs[0])); FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); System.exit(job.waitForCompletion(true) ? 0 : 1); } }
如果在window环境开发,以上代码想使用开发工具调试还需要做以下工作
1、首先创建maven项目
pom.xml
<dependencies> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>3.8.1</version> <scope>test</scope> </dependency> <dependency> <groupId>org.apache.logging.log4j</groupId> <artifactId>log4j-api</artifactId> <version>2.2</version> </dependency> <dependency> <groupId>org.apache.logging.log4j</groupId> <artifactId>log4j-core</artifactId> <version>2.2</version> </dependency> <dependency> <groupId>org.apache.logging.log4j</groupId> <artifactId>log4j-1.2-api</artifactId> <version>2.2</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>2.8.1</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-mapreduce-client-jobclient</artifactId> <version>2.8.1</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>2.8.1</version> </dependency> </dependencies> <build> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-jar-plugin</artifactId> <configuration> <archive> <manifest> <mainClass>com.lsw.hadoop.Dedup</mainClass> <addClasspath>true</addClasspath> <classpathPrefix>lib/</classpathPrefix> </manifest> </archive> <classesDirectory> </classesDirectory> </configuration> </plugin> <plugin> <artifactId>maven-dependency-plugin</artifactId> <executions> <execution> <id>copy-dependencies</id> <phase>package</phase> <goals> <goal>copy-dependencies</goal> </goals> <configuration> <outputDirectory>${project.build.directory}/lib</outputDirectory> <overWriteReleases>false</overWriteReleases> <overWriteSnapshots>false</overWriteSnapshots> <overWriteIfNewer>true</overWriteIfNewer> <!-- <excludeTransitive>true</excludeTransitive> 不包含间接引用的jar包 --> </configuration> </execution> </executions> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> <configuration> <appendAssemblyId>false</appendAssemblyId> <descriptors> <descriptor>${basedir}/assembly.xml</descriptor> </descriptors> </configuration> <executions> <execution> <id>make-assembly</id> <phase>package</phase> <goals> <goal>single</goal> </goals> </execution> </executions> </plugin> </plugins> <pluginManagement> <plugins> <!-- Ignore/Execute plugin execution --> <plugin> <groupId>org.eclipse.m2e</groupId> <artifactId>lifecycle-mapping</artifactId> <version>1.0.0</version> <configuration> <lifecycleMappingMetadata> <pluginExecutions> <pluginExecution> <pluginExecutionFilter> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-dependency-plugin</artifactId> <versionRange>[1.0.0,)</versionRange> <goals> <goal>copy-dependencies</goal> <goal>unpack</goal> </goals> </pluginExecutionFilter> <action> <ignore /> </action> </pluginExecution> </pluginExecutions> </lifecycleMappingMetadata> </configuration> </plugin> </plugins> </pluginManagement> </build>
2、配置hadoop本地环境
只是建立项目编写mapreduce就想本地连接远程hadoop调试是不可能的,需要配置本地hadoop环境
否则在还行main方法时会报出异常:“HADOOP_HOME and hadoop.home.dir are unset”
解决:
(1)把远程的hadoop二进制包在window本地环境解压一份,根据实际情况配置环境变量配置HADOOP_HOME
path中添加%HADOOP_HOME%\bin;%HADOOP_HOME%\sbin;
(2)下载(根据不同版本的hadoop下载相应文件,本例子hadoop2.8.1)并把hadoop.dll和winutils.exe及其附属(如果报错需要hadoop.exp、hadoop.lib、hadoop.pdb、libwinutils.lib、winutils.pdb)放到hadoop/bin中,hadoop.dll放一份到C:\Windows\System32下
(3)在本地工具执行main方法,本地会得到计算结果,会生成output目录
查看生产的计算文件
200001-3-1 a
200001-3-2 b
200001-3-3 c
200001-3-4 d
200001-3-5 a
200001-3-6 b
200001-3-7 c
200002-3-1 a
200002-3-2 b
200002-3-3 c
200002-3-6 b
200002-3-7 c
计算结果已经排重
(4)也可以打ZIP包放到安装了hadoop的linux执行,要把所有依赖jar包也一起打包
java -jar ***.jar
或者以下面例子方式执行
bin/hadoop jar hadoop-0.0.1-SNAPSHOT.jar com.lcore.hadoop.EventCount /test/input /test/input/out
相关推荐
通过实现MapReduce计算结果保存到MySql数据库过程,掌握多种方式保存计算结果的技术,加深了对MapReduce的理解;创建maven项目,项目名称hdfs,这里不再说明。红色部分为增加内容: