当前位置:   article > 正文

记录一次AmazonS3大文件切片上传_aws s3 sdk uploadpart

aws s3 sdk uploadpart

## 大文件上传无非一下几个步骤

1、文件上传初始化

2、上传切片(需要前端对文件进行切片操作,并发请求切片上传)

3、文件合并

## 不多废话直接上代码,直接复制使用即可

依赖(我这里用的阿里云存储可以根据自己需要引入依赖)

  1. <dependency>
  2. <groupId>com.amazonaws</groupId>
  3. <artifactId>aws-java-sdk-s3</artifactId>
  4. <version>1.12.261</version>
  5. </dependency>
  6. <!-- 阿里云存储-->
  7. <dependency>
  8. <groupId>com.aliyun.oss</groupId>
  9. <artifactId>aliyun-sdk-oss</artifactId>
  10. <version>3.10.2</version>
  11. </dependency>

初始化AmazonS3

  1. @Configuration
  2. @RequiredArgsConstructor
  3. @EnableConfigurationProperties(OssProperties.class)
  4. public class OssAutoConfiguration {
  5. @Bean
  6. @ConditionalOnMissingBean
  7. public AmazonS3 ossClient(OssProperties ossProperties) {
  8. ClientConfiguration clientConfiguration = new ClientConfiguration();
  9. clientConfiguration.setMaxConnections(ossProperties.getMaxConnections());
  10. AwsClientBuilder.EndpointConfiguration endpointConfiguration = new AwsClientBuilder.EndpointConfiguration(
  11. ossProperties.getEndpoint(), ossProperties.getRegion());
  12. AWSCredentials awsCredentials = new BasicAWSCredentials(ossProperties.getAccessKey(),
  13. ossProperties.getSecretKey());
  14. AWSCredentialsProvider awsCredentialsProvider = new AWSStaticCredentialsProvider(awsCredentials);
  15. return AmazonS3Client.builder().withEndpointConfiguration(endpointConfiguration)
  16. .withClientConfiguration(clientConfiguration).withCredentials(awsCredentialsProvider)
  17. .disableChunkedEncoding().withPathStyleAccessEnabled(ossProperties.getPathStyleAccess()).build();
  18. }
  19. }

ossProperties

  1. /**
  2. * Oss配置类
  3. */
  4. @Data
  5. @Configuration
  6. @ConfigurationProperties(prefix = "oss")
  7. public class OssProperties {
  8. /**
  9. * 对象存储服务endpoint
  10. */
  11. private String endpoint;
  12. /**
  13. * 区域region
  14. */
  15. private String region;
  16. /**
  17. * true path-style nginx 反向代理和S3默认支持 pathStyle模式 {http://endpoint/bucketname}
  18. * false supports virtual-hosted-style 阿里云等需要配置为 virtual-hosted-style 模式{http://bucketname.endpoint}
  19. * 只是url的显示不一样
  20. */
  21. private Boolean pathStyleAccess = true;
  22. /**
  23. * Access key
  24. */
  25. private String accessKey;
  26. /**
  27. * Secret key
  28. */
  29. private String secretKey;
  30. /**
  31. * 最大线程数,默认: 100
  32. */
  33. private Integer maxConnections = 1000;
  34. /**
  35. * 桶名
  36. */
  37. private String bucketName;
  38. /**
  39. * oss统一域名
  40. */
  41. private String ossUrl;
  42. }

请求参数FileChunkInfo

  1. @Data
  2. public class FileChunkInfo implements Serializable {
  3. private static final long serialVersionUID = 2353726406791217168L;
  4. // 切片上传初始化时当前文件的唯一id
  5. private String fileUploadId;
  6. // 当前切片id
  7. private Integer currentChunkNumber;
  8. // 切块大小
  9. private Integer chunkSize;
  10. // 文件md5
  11. private String fileMD5;
  12. // 文件
  13. private MultipartFile file;
  14. // 总块数
  15. private Integer totalChunks;
  16. // 文件名
  17. private String originalFileName;
  18. // 文件上传路径
  19. private String fileUploadPath;
  20. }

ResultEntity

  1. @Data
  2. @AllArgsConstructor
  3. @NoArgsConstructor
  4. public class ResultEntity<T> {
  5. private Integer code;
  6. private String message;
  7. private T data;
  8. public static <T> ResultEntity<T> faill(Integer code, String msg, T t) {
  9. return new ResultEntity<T>(code, msg, t);
  10. }
  11. public static <T> ResultEntity<T> faill(T t) {
  12. return ResultEntity.faill(501, "failed", t);
  13. }
  14. public static <T> ResultEntity<T> success(Integer code, String msg, T t) {
  15. return new ResultEntity<T>(code, msg, t);
  16. }
  17. public static <T> ResultEntity<T> success(T t) {
  18. return ResultEntity.success(200, "success!", t);
  19. }
  20. public static <T> ResultEntity<T> success() {
  21. return ResultEntity.success(200, "success!", null);
  22. }
  23. }

响应参数BigFileResp

  1. @Data
  2. public class BigFileResp implements Serializable {
  3. private static final long serialVersionUID = 3679861506816410985L;
  4. // 切片上传初始化时当前文件的唯一id
  5. private String fileUploadId;
  6. // 当前切片id
  7. private Integer currentChunkNumber;
  8. // 当前切片上传返回的etag值
  9. private PartETag partETag;
  10. // 已经上传的分片列表
  11. private List<Integer> finishChunks;
  12. // 文件上传路径
  13. private String fileUploadPath;
  14. // 文件md5
  15. private String fileMD5;
  16. // 文件路径
  17. private String url;
  18. // 文件名
  19. private String fileName;
  20. // 文件数据库id
  21. private String ossId;
  22. }

controller

  1. /**
  2. * 文件上传
  3. */
  4. @RestController
  5. @RequestMapping("file")
  6. @RequiredArgsConstructor
  7. public class UploadController {
  8. private final AmazonS3 amazonS3;
  9. private final OssProperties ossProperties;
  10. private final RedisTemplate<String, String> redisTemplate;
  11. /**
  12. * 初始化分片上传
  13. *
  14. * @param fileInfo
  15. * @return
  16. */
  17. @PostMapping("/init")
  18. public ResultEntity<BigFileResp> uploadInit(FileChunkInfo fileInfo) {
  19. // 自己生成文件路径
  20. String path = getPath(fileInfo);
  21. // 初始化
  22. InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(ossProperties.getBucketName(), path);
  23. InitiateMultipartUploadResult multipartUploadResult = amazonS3.initiateMultipartUpload(initRequest);
  24. String uploadId = multipartUploadResult.getUploadId();
  25. BigFileResp response = new BigFileResp();
  26. response.setFileUploadId(uploadId);
  27. response.setFileUploadPath(path);
  28. return ResultEntity.success(response);
  29. }
  30. /**
  31. * 上传分片
  32. *
  33. * @param fileInfo
  34. * @return
  35. * @throws Exception
  36. */
  37. @PostMapping("/part")
  38. public ResultEntity<BigFileResp> uploadPart(FileChunkInfo fileInfo) throws Exception {
  39. UploadPartRequest request = new UploadPartRequest()
  40. .withBucketName(ossProperties.getBucketName())
  41. .withKey(fileInfo.getFileUploadPath())
  42. .withUploadId(fileInfo.getFileUploadId())
  43. .withPartNumber(fileInfo.getCurrentChunkNumber())
  44. .withInputStream(fileInfo.getFile().getInputStream())
  45. .withPartSize(fileInfo.getChunkSize());
  46. // 上传切片
  47. UploadPartResult uploadPartResult = amazonS3.uploadPart(request);
  48. PartETag partETag = uploadPartResult.getPartETag();
  49. String fileUploadId = fileInfo.getFileUploadId();
  50. String etagString = JSONObject.toJSONString(partETag);
  51. // 将已上传文件分片信息存入redis
  52. redisTemplate.opsForHash().put(fileUploadId, fileInfo.getCurrentChunkNumber(), etagString);
  53. BigFileResp response = new BigFileResp();
  54. response.setFileUploadId(fileInfo.getFileUploadId());
  55. response.setPartETag(partETag);
  56. return ResultEntity.success(response);
  57. }
  58. /**
  59. * 文件合并
  60. *
  61. * @param fileInfo
  62. * @return
  63. */
  64. @PostMapping("/merge")
  65. public ResultEntity<BigFileResp> merge(FileChunkInfo fileInfo) {
  66. // 获取上传的etag集合
  67. Map<Object, Object> map = redisTemplate.opsForHash().entries(fileInfo.getFileUploadId());
  68. List<PartETag> etagList = new ArrayList<>();
  69. for (Map.Entry<Object, Object> entry : map.entrySet()) {
  70. String value = (String) entry.getValue();
  71. PartETag partETag = JSONObject.parseObject(value, PartETag.class);
  72. etagList.add(partETag);
  73. }
  74. // 合并分片
  75. CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(
  76. ossProperties.getBucketName(),
  77. fileInfo.getFileUploadPath(),
  78. fileInfo.getFileUploadId(),
  79. etagList);
  80. amazonS3.completeMultipartUpload(request);
  81. // 删除缓存
  82. redisTemplate.delete(fileInfo.getFileUploadId());
  83. StringBuilder url = new StringBuilder("");
  84. url.append("https://").append(ossProperties.getOssUrl()).append("/").append(fileInfo.getFileUploadPath());
  85. BigFileResp response = new BigFileResp();
  86. response.setUrl(url.toString());
  87. return ResultEntity.success(response);
  88. }
  89. private String getPath(FileChunkInfo fileInfo) {
  90. String uuid = IdUtil.fastSimpleUUID();
  91. String suffix = fileInfo.getOriginalFileName();
  92. Date now = new Date();
  93. String format = DateFormatUtils.format(now, "yyyy/MM/dd/");
  94. String path = format + uuid + suffix;
  95. return path + suffix;
  96. }
  97. }

## 写在最后

关于文件断点续传,因为文件分片上传时已经将已上传分片信息存入redis,出现网络问题续传时只需查出未上传的分片上传,然后合并文件即可。

关于秒传,只需将文件的MD5存入数据库,当文件上传时查询存在直接返回即可;

上述功能已自己实现即可!!希望对大家有所帮助!!!

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/Cpp五条/article/detail/706989
推荐阅读
相关标签
  

闽ICP备14008679号