1.下载hadoop插件(hadoop下载包里好像有这个插件)java
hadoop-eclipse-plugin-2.7.1分享连接 https://pan.baidu.com/s/1sldBu9napache
放到eclipse/plugins文件夹下,重启eclipseapp
2.window -> preferences 点击肯定 找到 hadoop map/reduce 在右窗口填上hadoop安装地址eclipse
3.出现一个和控制台同样位置的map/reduce location ,右击空白处 选择new hadoop locationoop
loaction name填上名字,Map/Reduce (V2) Master的端口填mapred-site.xml端口 。DFS Master填core-site.xml 肯定。host都是填localhost。this
4.File-->New-->Other-->Map/Reduce Project 建立文件 取名 新建java文件代码以下spa
5.源代码以下插件
package com.filex; import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Mapper.Context; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.Reducer.Context; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; public class WordCount { public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = new Job(conf); job.setJarByClass(WordCount.class); job.setJobName("wordcount"); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); job.setMapperClass(WordCountMap.class); job.setReducerClass(WordCountReduce.class); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// //下面的两句代码,其中参数意义 //hdfs://localhost:9000/in 表示须要计数的文件夹 计算命令行下:hadoop fs -ls /in 出现的文件 //hdfs://localhost:9000/output 表示储存结果的文件夹(不要建立,同时以前不要存在这个文件夹) //new Path(arg[0]) new Path(arg[1])也能够使用命令行传参的方式传入两个文件夹(不能够直接运行) // FileInputFormat.addInputPath(job, new Path("hdfs://localhost:9000/in")); FileOutputFormat.setOutputPath(job, new Path("hdfs://localhost:9000/output")); job.waitForCompletion(true); } public static class WordCountMap extends Mapper<LongWritable, Text, Text, IntWritable> { private final IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException { String line = value.toString(); StringTokenizer token = new StringTokenizer(line); while (token.hasMoreTokens()) { this.word.set(token.nextToken()); context.write(this.word, this.one); } } } public static class WordCountReduce extends Reducer<Text, IntWritable, Text, IntWritable> { public void reduce(Text key, Iterable<IntWritable> values, Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException { int sum = 0; for (IntWritable val : values) { sum += val.get(); } context.write(key, new IntWritable(sum)); } } }
ps:注意一下注释部分,须要确认你须要计算的文件命令行
6.直接运行 或者导出code
7.若是导出,运行命令:
hadoop jar .jar路径 运行的类(含包路径) 类的参数
hadoop jar /home/user/xxx.jar com.filex.WordCount (输入输出文件已经设置好)
hadoop jar /home/user/xxx.jar com.filex.WordCount in put ( 输入输出文件未设置好)。