一、HDFS集群API所需要jar包的maven配置信息 二、從HDFS下載數據文件/上傳文件到HDFS文件系統 思路:1.獲取配置信息 2.設置配置信息(塊大小、副本數) 3.構造客戶端 4.下載數據文件/上傳數據文件 5.關閉資源 (1)下載文件 (2)上傳文件 三、對HDFS系統進行操作的AP ...
一、HDFS集群API所需要jar包的maven配置信息
<dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>2.8.4</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>2.8.4</version> </dependency>
二、從HDFS下載數據文件/上傳文件到HDFS文件系統
思路:1.獲取配置信息
2.設置配置信息(塊大小、副本數)
3.構造客戶端
4.下載數據文件/上傳數據文件
5.關閉資源
(1)下載文件
/** * @author: PrincessHug * @date: 2019/3/18, 16:10 * @Blog: https://www.cnblogs.com/HelloBigTable/ */ public class HdfsClientDemo02 { public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException { //配置信息對象 Configuration conf = new Configuration(); //設置具體配置信息 conf.set("dfs.replication","2"); //構造客戶端 FileSystem fs = FileSystem.get(new URI("hdfs://192.168.126.128:9000/"), conf, "root"); //下載數據到本地 fs.copyToLocalFile(new Path("/words1.txt"),new Path("f://words1.txt")); //關閉資源 fs.close(); System.out.println("下載完成"); } }
(2)上傳文件
/** * @author: PrincessHug * @date: 2019/3/18, 11:53 * @Blog: https://www.cnblogs.com/HelloBigTable/ */ public class HdfsClientDemo01 { public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException { //配置信息 Configuration conf = new Configuration(); //配置塊大小和副本數 conf.set("dfs.blocksize","64m"); conf.set("dfs.replication","2"); //構造客戶端 FileSystem fs = FileSystem.get(new URI("hdfs://192.168.126.128:9000/"), conf, "root"); //上傳文件到hdfs客戶端 fs.copyFromLocalFile(new Path("/root/love.tsv"),new Path("/love1.tsv")); //關閉資源 fs.close(); System.out.println("上傳成功!"); } }
三、對HDFS系統進行操作的API
/** * @author: PrincessHug * @date: 2019/3/18, 16:16 * @Blog: https://www.cnblogs.com/HelloBigTable/ */ public class HdfsClientDemo { private static FileSystem fs = null; static { Configuration conf = new Configuration(); conf.set("dfs.blocksize","64m"); conf.set("dfs.replication","3"); try { fs = FileSystem.get(new URI("hdfs://192.168.126.128:9000/"),conf,"root"); } catch (IOException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } catch (URISyntaxException e) { e.printStackTrace(); } } /** * 創建文件夾方法 * @throws IOException */ public void mkDir(String path) throws IOException { fs.mkdirs(new Path(path)); fs.close(); } /** * 重命名或移動文件 * @param path1 * @param path2 * @throws IOException */ public void hdfsRename(String path1,String path2) throws IOException { fs.rename(new Path(path1),new Path(path2)); fs.close(); } /** * 刪除文件或文件夾 * @param path 路徑 * @throws IOException */ public void delete(String path) throws IOException { fs.delete(new Path(path),true); fs.close(); } /** * 列出hdfs指定的目錄信息 * @param path * @throws IOException */ public void list(String path) throws IOException { RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(new Path(path), true); while (iterator.hasNext()){ //拿數據 LocatedFileStatus status = iterator.next(); System.out.println("文件的路徑為:" + status.getPath()); System.out.println("文件的塊大小為:" + status.getBlockSize()); System.out.println("文件的塊信息為:" + Arrays.toString(status.getBlockLocations())); System.out.println("文件的長度為:" + status.getLen()); System.out.println("文件的副本數為:" + status.getReplication()); System.out.println("====================================================="); } fs.close(); } /** * 判斷時文件還是文件夾 * @param parh * @throws IOException */ public void judgeFileOrDir(String parh) throws IOException { //展示狀態信息 FileStatus[] fileStatuses = fs.listStatus(new Path(parh)); //遍歷所有文件 for (FileStatus fs:fileStatuses){ if (fs.isFile()){ System.out.println("文件-----f------" + fs.getPath().getName()); }else { System.out.println("文件-----d------" + fs.getPath().getName()); } } } } public class HdfsDriver { public static void main(String[] args) { HdfsClientDemo hcd = new HdfsClientDemo(); try { //hcd.mkDir("/wyh"); hcd.judgeFileOrDir("/"); hcd.list("/"); } catch (IOException e) { e.printStackTrace(); } } }
四、