赞
踩
前提:服务器中已经配置好了hadoop
本人亲测,以下代码已经跑通,基础功能都可以完成!!!希望对大家有用!!!
cn.et
<!-- hdfs依赖 -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.1.3</version>
</dependency>
创建一个HdfsApiUtils 类,用于实现hdfs的增删改查:
hdfs://地址:9000/新目录
)package com.example.springbootonline.utils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Bean; import org.springframework.stereotype.Component; import java.io.IOException; import java.net.URI; import java.util.List; /** * Created with IntelliJ IDEA. * * @Author: Print * @Date: 2023/07/17/10:07 * @Description:idea集成hdfs;地址hdfs://地址:9000 */ @Component public class HdfsApiUtils { private static String hdfsUrl = "hdfs://地址:9000"; private static String hdfsUsername = "root"; private static FileSystem hdfs; static { Configuration conf = new Configuration(); // 上传到云服务器需要配置下面这个句话 conf.set("dfs.client.use.datanode.hostname","true"); try { hdfs = FileSystem.get(URI.create(hdfsUrl), conf, hdfsUsername); } catch (Exception e) { throw new RuntimeException(e); } } /** * 创建文件或目录 */ public boolean mkdir(String path) { boolean res = false; try { hdfs.mkdirs(new Path(path)); res = true; } catch (IOException e) { throw new RuntimeException(e); } return res; } /** * 删除文件或目录 */ public boolean delete(String path) { boolean res = false; try { res = hdfs.delete(new Path(path), true); } catch (IOException e) { throw new RuntimeException(e); } return res; } /** * 修改或移动文件或目录 */ public boolean rename(String oldFile, String newFlie) { boolean res = false; try { res = hdfs.rename(new Path(oldFile), new Path(newFlie)); } catch (IOException e) { throw new RuntimeException(e); } return res; } /** * 查询当前路径下所有的文件或目录(只查当前目录下) */ public FileStatus[] findCurrent(String path) { FileStatus[] res = null; try { res = hdfs.listStatus(new Path(path)); } catch (IOException e) { throw new RuntimeException(e); } return res; } /** * 查询当前路径下所有的文件或目录(递归查下面所有) */ public RemoteIterator<LocatedFileStatus> findAll(String path){ RemoteIterator<LocatedFileStatus> iterator = null; try { iterator = hdfs.listFiles(new Path(path),true); } catch (IOException e) { throw new RuntimeException(e); } return iterator; } /** * 上传 */ public boolean upload(String localPath, String path) { boolean res = false; try { hdfs.copyFromLocalFile(new Path(localPath), new Path(path)); res = true; } catch (IOException e) { res = false; throw new RuntimeException(e); } return res; } /** * 下载 */ public boolean download(String hdfsPath, String localPath) { boolean res = false; try { hdfs.copyToLocalFile(new Path(hdfsPath), new Path(localPath)); res = true; } catch (IOException e) { throw new RuntimeException(e); } return res; } }
import com.example.springbootonline.utils.HdfsApiUtils; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.RemoteIterator; import org.junit.jupiter.api.Test; import org.springframework.stereotype.Component; import java.io.IOException; /** * Created with IntelliJ IDEA. * * @Author: Print * @Date: 2023/07/17/10:59 * @Description: */ @Component public class HdfsApiUtilsTest { HdfsApiUtils hdfsApiUtils = new HdfsApiUtils(); @Test public void mkdir(){ String newFile = "/file"; System.out.println(hdfsApiUtils.mkdir(newFile)); } @Test public void delete(){ String path = "/aaa"; System.out.println(hdfsApiUtils.delete(path)); } @Test public void rename(){ String oldFile = "/aaa",newFile = "/newAAA"; System.out.println(hdfsApiUtils.rename(oldFile,newFile)); } @Test public void upload(){ String localPath = "F:\\Users\\HP\\Videos\\Captures\\demo.mp4",path = "/abc/aaa"; System.out.println(hdfsApiUtils.upload(localPath,path)); } @Test public void findCurrent(){ String path = "/file"; FileStatus[] fss = hdfsApiUtils.findCurrent(path); for (FileStatus fs:fss) { System.out.println(fs.toString()+"\n"); } System.out.println(); } @Test public void findAll() throws IOException { String path = "/file"; RemoteIterator<LocatedFileStatus> iterator = hdfsApiUtils.findAll(path); while (iterator.hasNext()){ System.out.println(iterator.next().toString()); } } }
好像应该再写一个服务器如何配置hadoop,后面再看有没有时间吧
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。