用java运行Hadoop例程报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.所写代码以下:java
package com.pcitc.hadoop; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; /** * 获取HDFS集群上全部节点名称 * @author lenovo * */ public class GetList { public static void main(String[] args) throws IOException { Configuration conf = new Configuration(); conf.set("dfs.default.name", "hdfs://hadoopmaster:9000"); FileSystem fs = FileSystem.get(conf); DistributedFileSystem hdfs = (DistributedFileSystem) fs; DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats(); String[] names = new String[dataNodeStats.length]; for (int i = 0; i < dataNodeStats.length; i++) { names[i] = dataNodeStats[i].getHostName(); System.out.println("node" + i + "name" + names[i]); } } }
执行以后报以下错误:node
Exception in thread "main" java.lang.ClassCastException: org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.hadoop.hdfs.DistributedFileSystem
at org.apache.hadoop.examples.FindFileOnHDFS.getHDFSNodes(FindFileOnHDFS.java:43)
at org.apache.hadoop.examples.FindFileOnHDFS.main(FindFileOnHDFS.java:16)
缘由是DistributedFileSystem和LocalFileSystem都是FileSystem的子类,FileSystem.get(conf)获得的是LocalFileSystem的instance, 这个类型应该是默认的,要得到DistributedFileSystem,须要配置conf对象,按照个人写法我以为应该是配了conf对象了,可是仍是保存,最后按照网上的说法进行相应修改就能够了。直接上修改后的代码以下(注意红色部分):apache
package com.pcitc.hadoop; import java.io.IOException; import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; /** * 获取HDFS集群上全部节点名称 * * @author lenovo * */ public class GetList { public static void main(String[] args) throws IOException { Configuration conf = new Configuration(); // conf.set("dfs.default.name", "hdfs://hadoopmaster:9000"); String uri = "hdfs://hadoopmaster:9000"; FileSystem fs = FileSystem.get(URI.create(uri), conf); DistributedFileSystem hdfs = (DistributedFileSystem) fs; DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats(); String[] names = new String[dataNodeStats.length]; for (int i = 0; i < dataNodeStats.length; i++) { names[i] = dataNodeStats[i].getHostName(); System.out.println("node:" + i + ",name:" + names[i]); } } }