上一篇文章博主为你们讲述了hadoop的命令行操做,以及第一个mapreduce程序的运行案例。本篇将博主将继续解释在windows10系统上搭建hadoop以及使用JavaAPI操做HDFS文件系统。java
1、win10上搭建hadoop环境node
1.官网下载hadoop-2.9.1.tar.gz版本,解压:E:\Program Files\hadoop-2.9.1apache
2.配置环境变量windows
HADOOP_HOME=E:\hadoop-2.9.1 PATH=%HADOOP_HOME%\bin
3.将windows上编译的文件hadoop.dll、winutils.exe放至%HADOOP_HOME%\bin下centos
4.将hadoop.dll放到c:/windows/System32下api
5.设置E:\hadoop-2.9.1\etc\hadoop\hadoop-env.cmd中的JAVA_HOME为真实java路径(路径中不能带空格,否者会报错).服务器
6.测试hadoop是否配置成功,命令行输入:hadoop versioneclipse
2、在eclipse中新建maven工程hadoop-demojvm
pom.xml配置文件:maven
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client --> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>2.9.1</version> </dependency>
测试代码:HdfsClientDemo.java
package com.empire.hadoop.hadoop_demo; import java.net.URI; import java.util.Iterator; import java.util.Map.Entry; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.junit.Before; import org.junit.Test; /** * hdfs客户端测试 */ public class HdfsClientDemo { FileSystem fs = null; Configuration conf = null; /** * 初始化hadoop hdfs文件系统远程客户端 * @throws Exception */ @Before public void init() throws Exception{ conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://master:9000"); //客户端去操做hdfs时,是有一个用户身份的 //默认状况下,hdfs客户端api会从jvm中获取一个参数来做为本身的用户身份:-DHADOOP_USER_NAME=hadoop //拿到一个文件系统操做的客户端实例对象 /*fs = FileSystem.get(conf);*/ //不配置会报错 System.setProperty("hadoop.home.dir", "E:\\\\hadoop-2.9.1"); //能够直接传入 uri和用户身份 //centos-aaron-h1为namenode的主机名或者域名 fs = FileSystem.get(new URI("hdfs://centos-aaron-h1:9000"),conf,"hadoop"); //最后一个参数为用户名 } /** * 上传文件 * @throws Exception */ @Test public void testUpload() throws Exception { Thread.sleep(2000); fs.copyFromLocalFile(new Path("F:/access.log"), new Path("/access.log.copy")); fs.close(); } /** * 下载文件 * @throws Exception */ @Test public void testDownload() throws Exception { fs.copyToLocalFile(new Path("/access.log.copy"), new Path("d:/")); fs.close(); } /** * 打印环境中的配置项 */ @Test public void testConf(){ Iterator<Entry<String, String>> iterator = conf.iterator(); while (iterator.hasNext()) { Entry<String, String> entry = iterator.next(); System.out.println(entry.getKey() + "--" + entry.getValue());//conf加载的内容 } } /** * 建立目录 */ @Test public void makdirTest() throws Exception { boolean mkdirs = fs.mkdirs(new Path("/aaa/bbb")); System.out.println(mkdirs); } /** * 删除 */ @Test public void deleteTest() throws Exception{ boolean delete = fs.delete(new Path("/aaa"), true);//true, 递归删除 System.out.println(delete); } /** * 递归显示文件 * @throws Exception */ @Test public void listTest() throws Exception{ FileStatus[] listStatus = fs.listStatus(new Path("/")); for (FileStatus fileStatus : listStatus) { System.out.println(fileStatus.getPath()+"================="+fileStatus.toString()); System.out.println((fileStatus.isFile()?"file":"directory")); } //会递归找到全部的文件 RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/"), true); while(listFiles.hasNext()){ LocatedFileStatus next = listFiles.next(); String name = next.getPath().getName(); Path path = next.getPath(); System.out.println(name + "---" + path.toString()); System.out.println((next.isFile()?"file":"directory")); System.out.println("blocksize: " +next.getBlockSize()); System.out.println("owner: " +next.getOwner()); System.out.println("Replication: " +next.getReplication()); System.out.println("Permission: " +next.getPermission()); System.out.println("Name: " +next.getPath().getName()); System.out.println("------------------"); BlockLocation[] blockLocations = next.getBlockLocations(); for(BlockLocation b:blockLocations){ System.out.println("块起始偏移量: " +b.getOffset()); System.out.println("块长度:" + b.getLength()); //块所在的datanode节点 String[] datanodes = b.getHosts(); for(String dn:datanodes){ System.out.println("datanode:" + dn); } } } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://master:9000"); //拿到一个文件系统操做的客户端实例对象 FileSystem fs = FileSystem.get(conf); fs.copyFromLocalFile(new Path("E:/access.log"), new Path("/access.log.copy")); fs.close(); } }
运行测试结果(按照上面的配置所有成功), 若是配置路径中带有空格啥的,可能报找不到hadoop命令路径、jdk路径。
3、hadoop文件系统(FileSystem实现类型)
最后寄语,以上是博主本次文章的所有内容,若是你们以为博主的文章还不错,请点赞;若是您对博主其它服务器大数据技术或者博主本人感兴趣,请关注博主博客,而且欢迎随时跟博主沟通交流。