赞
踩
ChunkFileInfoBO
package org.dromara.common.oss.chunkfile.entity;
import jakarta.validation.constraints.NotEmpty;
import jakarta.validation.constraints.NotNull;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.Serializable;
/**
* 分片文件信息
*
* @since 2024/2/6 10:24
*/
@Data
@NoArgsConstructor
public class ChunkFileInfoBO implements Serializable {
/**
* 文件内容 唯一标识 md5 (文件分片、总文件整体)
*/
@NotEmpty(message = "文件内容唯一标识 不能为空")
private String identifier;
/**
* 文件名称
*/
@NotEmpty(message = "文件名称 不能为空")
private String filename;
/**
* 分块大小 根据 totalSize 和这个值可以计算出总共的块数。
*/
private Long chunkSize;
/**
* 文件总大小
*/
private Long totalSize;
/**
* 总块数
*/
@NotNull(message = "文件分片总块数 不能为空")
private Integer totalChunks;
/**
* 分块编号 从1开始,注意不是从 0 开始的
*/
@NotNull(message = "当前文件分片编号 不能为空")
private Integer chunkNumber;
/**
* 当前分块大小
*/
@NotNull(message = "当前文件分片大小 不能为空")
private Long currentChunkSize;
/**
* 文件前端上传请求唯一标识
*/
@NotEmpty(message = "文件上传请求唯一标识 不能为空")
private String requestId;
/**
* 文件后端上传请求唯一标识
*/
private String uploadId;
/**
* 文件类型后缀
*/
private String suffix;
/**
* 文件原始名称
*/
private String newFilename;
/**
* 相对路径
*/
private String relativePath;
}
FileMergeBO
package org.dromara.common.oss.chunkfile.entity;
import jakarta.validation.constraints.NotEmpty;
import jakarta.validation.constraints.NotNull;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.Serializable;
/**
* 分片合并上传参数
*
* @since 2024/2/6 14:24
*/
@Data
@NoArgsConstructor
public class FileMergeBO implements Serializable {
/**
* 文件名称
*/
@NotEmpty(message = "文件名称 不能为空")
private String filename;
/**
* 文件内容唯一标识
*/
@NotEmpty(message = "文件内容唯一标识 不能为空")
private String identifier;
/**
* 文件前端上传请求唯一标识
*/
@NotEmpty(message = "文件上传请求唯一标识 不能为空")
private String requestId;
/**
* 总块数
*/
@NotNull(message = "文件分片总块数 不能为空")
private Integer totalChunks;
}
FileUploadResult
package org.dromara.common.oss.chunkfile.entity;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Set;
/**
* 分片文件上传返回结果
*
* @since 2024/2/6 10:25
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
@Builder
public class FileUploadResult {
/**
* 是否跳过上传 (已经上传过的,跳过)
*/
private Boolean skipUpload;
/**
*
*/
private Set<String> uploaded;
/**
* 是否需要合并
*/
private Boolean needMerge;
/**
* 当前文件分片编号
*/
private Integer currentChunkNum;
/**
* 文件内容 唯一标识 md5 (文件分片、总文件整体)
*/
private String identifier;
/**
* 文件名称
*/
private String fileName;
/**
* 文件前端上传请求唯一标识
*/
private String requestId;
/**
* sys_oss 主键标识
*/
private Long ossFileId;
/**
* 文件上传后的url地址
*/
private String url;
}
SysOssRedisDTO
package org.dromara.common.oss.chunkfile.entity;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.Serializable;
/**
* redis缓存存储对象
*
* @since 2024/2/6 14:08
*/
@Data
@NoArgsConstructor
public class SysOssRedisDTO implements Serializable {
/**
* 文件后端上传请求唯一标识
*/
private String uploadId;
/**
* 文件上传后的url地址
*/
private String url;
/**
* 文件名称
*/
private String filePathName;
/**
* 文件内容 唯一标识 md5 (文件分片、总文件整体)
*/
private String identifier;
/**
* 服务商
*/
private String ossService;
}
FileUploadStatus
package org.dromara.common.oss.chunkfile.enumd;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* 文件分片 上传状态枚举
*
* @since 2024/2/6 11:05
*/
@Getter
@AllArgsConstructor
public enum FileUploadStatus {
/**
* 上传中
*/
UPLOADING("0", "上传进行中"),
/**
* 上传完成
*/
UPLOADED("1", "上传完成");
/**
* 状态码
*/
private final String code;
/**
* 状态描述
*/
private final String desc;
}
常量类 OssConstant
中添加文件分片上传缓存常量
/**
* 分片文件 块前缀
*/
String CHUNK_FILE_PART_PREFIX = GlobalConstants.GLOBAL_REDIS_KEY + "chunk_file_part:";
/**
* 分片文件 信息前缀
*/
String CHUNK_FILE_INFO_PREFIX = GlobalConstants.GLOBAL_REDIS_KEY + "chunk_file_info:";
改造 OssClient
类,添加文件分片上传合并到oss的相关方法。
package org.dromara.common.oss.core;
public class OssClient {
// ------ 扩展方法 ---------
public String getPath(String suffix) {
return getPath(properties.getPrefix(), suffix);
}
/**
* 获取分片上传的统一id
*
* @param path 完整文件路径
*/
public String getChunkUploadUnionId(String path, String contentType) {
String uploadId = null;
try {
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(properties.getBucketName(), path);
if (StringUtils.isNotBlank(contentType)) {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(contentType);
initRequest.setObjectMetadata(metadata);
}
initRequest.setCannedACL(getAccessPolicy().getAcl());
InitiateMultipartUploadResult initResult = client.initiateMultipartUpload(initRequest);
uploadId = initResult.getUploadId();
} catch (Exception e) {
throw new OssException("上传文件失败,请检查配置信息:[" + e.getMessage() + "]");
}
System.out.println("uploadId-----:" + uploadId);
return uploadId;
}
/**
* 分片上传
*
* @param inputStream
* @param path 完整文件路径
* @param uploadId
* @param partSize
* @param partNum
* @return
*/
public PartETag chunkUpload(InputStream inputStream, String path,
String uploadId, Long partSize, int partNum) {
if (!(inputStream instanceof ByteArrayInputStream)) {
inputStream = new ByteArrayInputStream(IoUtil.readBytes(inputStream));
}
PartETag partETag = null;
try {
UploadPartRequest uploadPartRequest = new UploadPartRequest()
.withBucketName(properties.getBucketName())
.withKey(path)
.withUploadId(uploadId)
.withPartNumber(partNum)
.withInputStream(inputStream)
.withPartSize(partSize);
UploadPartResult uploadPartResult = client.uploadPart(uploadPartRequest);
partETag = uploadPartResult.getPartETag();
} catch (Exception e) {
throw new OssException("上传文件失败,请检查配置信息:[" + e.getMessage() + "]");
}
return partETag;
}
/**
* 分片上传完成
*
* @param path 完整文件路径
*/
public UploadResult chunkUploadComplete(String path, String uploadId, List<PartETag> partETags) {
try {
CompleteMultipartUploadRequest completeMultipartUploadRequest =
new CompleteMultipartUploadRequest(properties.getBucketName(), path, uploadId, partETags);
CompleteMultipartUploadResult completeMultipartUploadResult = client.completeMultipartUpload(completeMultipartUploadRequest);
System.out.println(completeMultipartUploadResult.getETag());
} catch (Exception e) {
throw new OssException("上传文件失败,请检查配置信息:[" + e.getMessage() + "]");
}
return UploadResult.builder().url(getUrl() + "/" + path).filename(path).build();
}
/**
* @param path
* @param uploadId
* @return
*/
public List<PartSummary> getUploadedParts(String path, String uploadId) {
ListPartsRequest listPartsRequest = new ListPartsRequest(properties.getBucketName(), path, uploadId);
PartListing partListing = client.listParts(listPartsRequest);
return partListing.getParts();
}
/**
* 检查 文件是否已经存在
*
* @param path 文件名
* @return t/f
*/
public Boolean checkFileExist(String path) {
return client.doesObjectExist(properties.getBucketName(), path);
}
}
RedisUtils
类添加两个方法
public static <V> V setCacheMapValueIfAbsent(final String key, final String mayKey, V mapValue) {
RMap<String, V> rMap = CLIENT.getMap(key);
rMap.putIfAbsent(mayKey, mapValue);
return mapValue;
}
public static <V> V setCacheMapValueIfExist(final String key, final String mayKey, V mapValue) {
RMap<String, V> rMap = CLIENT.getMap(key);
rMap.putIfExists(mayKey, mapValue);
return mapValue;
}
system
模块
package org.dromara.system.domain;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import lombok.Data;
import lombok.EqualsAndHashCode;
import org.dromara.common.tenant.core.TenantEntity;
/**
* OSS对象存储对象
*
* @author Lion Li
*/
@Data
@EqualsAndHashCode(callSuper = true)
@TableName("sys_oss")
public class SysOss extends TenantEntity {
/**
* 对象存储主键
*/
@TableId(value = "oss_id")
private Long ossId;
/**
* 文件名
*/
private String fileName;
/**
* 原名
*/
private String originalName;
/**
* 文件后缀名
*/
private String fileSuffix;
/**
* URL地址
*/
private String url;
/**
* 服务商
*/
private String service;
// =================== 文件分片扩展字段 ===================
/**
* 文件内容 唯一标识 md5 (文件分片、总文件整体)
*/
private String identifier;
/**
* 文件前端上传请求唯一标识
*/
private String requestId;
/**
* 文件上传状态
*
* @see org.dromara.common.oss.chunkfile.enumd.FileUploadStatus
*/
private String uploadStatus;
/**
* 文件后端上传请求唯一标识
*/
private String uploadId;
}
SysOssVo 类扩展字段
package org.dromara.system.domain.vo;
import org.dromara.common.translation.annotation.Translation;
import org.dromara.common.translation.constant.TransConstant;
import org.dromara.system.domain.SysOss;
import io.github.linpeilie.annotations.AutoMapper;
import lombok.Data;
import java.io.Serial;
import java.io.Serializable;
import java.util.Date;
/**
* OSS对象存储视图对象 sys_oss
*
* @author Lion Li
*/
@Data
@AutoMapper(target = SysOss.class)
public class SysOssVo implements Serializable {
@Serial
private static final long serialVersionUID = 1L;
/**
* 对象存储主键
*/
private Long ossId;
/**
* 文件名
*/
private String fileName;
/**
* 原名
*/
private String originalName;
/**
* 文件后缀名
*/
private String fileSuffix;
/**
* URL地址
*/
private String url;
/**
* 创建时间
*/
private Date createTime;
/**
* 上传人
*/
private Long createBy;
/**
* 上传人名称
*/
@Translation(type = TransConstant.USER_ID_TO_NAME, mapper = "createBy")
private String createByName;
/**
* 服务商
*/
private String service;
/**
* 文件前端上传请求唯一标识
*/
private String requestId;
}
ISysOssService
// ============================== 分片上传 ============================
/**
* 分片文件上传OSS对象存储,秒传检验
*
* @param chunkFileInfoBO 分片文件信息
* @return 分片文件上传返回结果
*/
FileUploadResult getChunkUpload(ChunkFileInfoBO chunkFileInfoBO);
/**
* 分片文件上传 OSS对象存储
*
* @param file 分片文件
* @param chunkFileInfoBO 分片文件信息
* @return 上传结果
*/
FileUploadResult chunkUpload(MultipartFile file, ChunkFileInfoBO chunkFileInfoBO);
/**
* 文件分片上传完成 合并
*
* @param fileMergeBO 分片合并上传参数
* @return vo
*/
SysOssVo chunkUploadMerge(FileMergeBO fileMergeBO);
SysOssServiceImpl
// ============================== 分片上传 ============================
/**
* 分片文件上传OSS对象存储,秒传检验
*
* @param chunkFileInfoBO 分片文件信息
* @return 分片文件上传返回结果
*/
@Override
public FileUploadResult getChunkUpload(ChunkFileInfoBO chunkFileInfoBO) {
OssClient storage = OssFactory.instance();
SysOss ossFromDb = baseMapper.selectOne(new LambdaQueryWrapper<SysOss>()
.eq(SysOss::getIdentifier, chunkFileInfoBO.getIdentifier())
.eq(SysOss::getService, storage.getConfigKey())
.last("limit 1"));
// 保存 文件上传日志
SysOss newOssFile = saveFileUploadLog(ossFromDb, storage, chunkFileInfoBO.getFilename(),
chunkFileInfoBO.getRequestId(), chunkFileInfoBO.getIdentifier(),
chunkFileInfoBO.getTotalChunks() > 1);
FileUploadResult fileUploadResult = getObjectFromSysOss(newOssFile);
//如果文件上传完成,则秒传结束
if (FileUploadStatus.UPLOADED.getCode().equals(newOssFile.getUploadStatus())) {
fileUploadResult.setSkipUpload(true);
fileUploadResult.setNeedMerge(false);
return fileUploadResult;
} else {
String filePartKey = OssConstant.CHUNK_FILE_PART_PREFIX + chunkFileInfoBO.getIdentifier();
// 如果文件上传信息不存在,则缓存文件共性信息
putInCache(newOssFile, chunkFileInfoBO.getTotalChunks());
//如果需要分片
if (chunkFileInfoBO.getTotalChunks() > 1) {
//获取已上传的分片信息 Map<partNumber, eTag>
Map<String, String> partNumMap = null;
try {
partNumMap = getUploadedPartNums(newOssFile.getFileName(), newOssFile.getUploadId());
} catch (Exception e) {
//如果文件已存在,则直接更新为完成
if (storage.checkFileExist(newOssFile.getFileName())) {
baseMapper.update(null, new LambdaUpdateWrapper<SysOss>()
.set(SysOss::getUploadStatus, FileUploadStatus.UPLOADED)
.eq(SysOss::getIdentifier, newOssFile.getIdentifier())
.eq(SysOss::getUploadStatus, FileUploadStatus.UPLOADING)
.eq(SysOss::getService, newOssFile.getService()));
fileUploadResult.setSkipUpload(true);
fileUploadResult.setNeedMerge(false);
return fileUploadResult;
}
throw new ServiceException(e.getMessage());
}
RedisUtils.setCacheMap(filePartKey, partNumMap);
fileUploadResult.setUploaded(partNumMap.keySet());
}
fileUploadResult.setSkipUpload(false);
fileUploadResult.setNeedMerge(chunkFileInfoBO.getTotalChunks() > 1);
return fileUploadResult;
}
}
/**
* 根据新增到 sys_oss 中的实例 拼接分片上传返回结果
*
* @param sysOss 数据库对象实例
* @return Result
*/
private FileUploadResult getObjectFromSysOss(SysOss sysOss) {
FileUploadResult fileUploadResult = new FileUploadResult();
fileUploadResult.setFileName(sysOss.getOriginalName());
fileUploadResult.setIdentifier(sysOss.getIdentifier());
fileUploadResult.setRequestId(sysOss.getRequestId());
fileUploadResult.setOssFileId(sysOss.getOssId());
fileUploadResult.setUrl(sysOss.getUrl());
return fileUploadResult;
}
/**
* @param newOssFile
* @param totalChunks
*/
private void putInCache(SysOss newOssFile, int totalChunks) {
String fileInfoKey = OssConstant.CHUNK_FILE_INFO_PREFIX + newOssFile.getIdentifier();
SysOssRedisDTO ossRedisDTO = new SysOssRedisDTO();
ossRedisDTO.setUploadId(newOssFile.getUploadId());
ossRedisDTO.setUrl(newOssFile.getUrl());
ossRedisDTO.setFilePathName(newOssFile.getFileName());
ossRedisDTO.setIdentifier(newOssFile.getIdentifier());
ossRedisDTO.setOssService(newOssFile.getService());
RedisUtils.setObjectIfAbsent(fileInfoKey, ossRedisDTO, Duration.ofMinutes(totalChunks * 2L));
}
/**
* 保存文件上传日志 文件信息保存到sys_oss中
*
* @param ossFromDb 根据md5标识从数据库查询的信息,可能为null
* @param storage AWS S3实例
* @param originalFileName 原始文件名
* @param requestId 文件前端上传请求唯一标识
* @param identifier 文件内容唯一标识 md5
* @param ifChunkUpload 是否分片上传 (总块数>1 的时候分片上传)
* @return
*/
private SysOss saveFileUploadLog(SysOss ossFromDb, OssClient storage,
String originalFileName, String requestId,
String identifier, boolean ifChunkUpload) {
String suffix = StringUtils.substring(originalFileName, originalFileName.lastIndexOf("."), originalFileName.length());
//获取随机文件路径
String ossFilePath = storage.getPath(suffix);
SysOss newEntity = new SysOss();
newEntity.setRequestId(requestId);
//文件的全路径名
newEntity.setFileName(ossFilePath);
newEntity.setOriginalName(originalFileName);
newEntity.setIdentifier(identifier);
newEntity.setFileSuffix(suffix);
newEntity.setUploadStatus(FileUploadStatus.UPLOADING.getCode());
// oss为null,说明是没有上传过的文件
if (ObjectUtils.isEmpty(ossFromDb)) {
if (ifChunkUpload) {
// 获取分片上传的统一id,即uploadId
String uploadId = storage.getChunkUploadUnionId(ossFilePath, "");
newEntity.setUploadId(uploadId);
}
newEntity.setService(storage.getConfigKey());
newEntity.setUrl(storage.getUrl() + "/" + ossFilePath);
} else {// oss不为null,说明是之前上传过的文件
newEntity.setService(ossFromDb.getService());
newEntity.setUrl(ossFromDb.getUrl());
newEntity.setUploadId(ossFromDb.getUploadId());
if (FileUploadStatus.UPLOADED.getCode().equals(ossFromDb.getUploadStatus())) {
newEntity.setUploadStatus(FileUploadStatus.UPLOADED.getCode());
}
//如果已存在同样的文件,则文件云存储名称也一致
newEntity.setFileName(ossFromDb.getFileName());
}
baseMapper.insert(newEntity);
return newEntity;
}
/**
* 获取已上传的分片序号
*
* @param path 文件名
* @param uploadId 分片上传统一id
* @return Map<partNumber, eTag>
**/
private Map<String, String> getUploadedPartNums(String path, String uploadId) {
OssClient storage = OssFactory.instance();
List<PartSummary> partSummaryList = storage.getUploadedParts(path, uploadId);
return partSummaryList.stream().collect(Collectors.toMap(ele -> String.valueOf(ele.getPartNumber()), PartSummary::getETag));
}
/**
* 分片文件上传 OSS对象存储
*
* @param file 分片文件
* @param chunkFileInfoBO 分片文件信息
* @return 上传结果
*/
@Override
public FileUploadResult chunkUpload(MultipartFile file, ChunkFileInfoBO chunkFileInfoBO) {
FileUploadResult fileUploadResult = getObjectFromChunkFileInfo(chunkFileInfoBO);
OssClient storage = OssFactory.instance();
String fileInfoKey = OssConstant.CHUNK_FILE_INFO_PREFIX + chunkFileInfoBO.getIdentifier();
SysOssRedisDTO ossRedisDTO = RedisUtils.getCacheObject(fileInfoKey);
if (ObjectUtils.isEmpty(ossRedisDTO)) {
throw new ServiceException("未查询到要上传的文件信息");
}
fileUploadResult.setUrl(ossRedisDTO.getUrl());
// 文件的唯一标识
String filePartKey = OssConstant.CHUNK_FILE_PART_PREFIX + chunkFileInfoBO.getIdentifier();
// 文件分片编号
Integer partNum = chunkFileInfoBO.getChunkNumber();
String value = RedisUtils.setCacheMapValueIfAbsent(filePartKey, String.valueOf(partNum), "");
//PartETag partETag=null;
if (StringUtils.isEmpty(value)) {
//分片总数<=1,直接pull上传
if (chunkFileInfoBO.getTotalChunks() <= 1) {
try {
storage.upload(file.getBytes(), ossRedisDTO.getFilePathName(), file.getContentType());
} catch (Exception e) {
throw new ServiceException(e.getMessage());
}
baseMapper.update(null, new LambdaUpdateWrapper<SysOss>()
.set(SysOss::getUploadStatus, FileUploadStatus.UPLOADED.getCode())
.eq(SysOss::getIdentifier, chunkFileInfoBO.getIdentifier())
.eq(SysOss::getService, ossRedisDTO.getOssService()));
RedisUtils.deleteObject(filePartKey);
return fileUploadResult;
}
//否则分片上传
// 文件分片大小
Long partSize = chunkFileInfoBO.getCurrentChunkSize();
try {
PartETag partETag = storage.chunkUpload(file.getInputStream(), ossRedisDTO.getFilePathName(), ossRedisDTO.getUploadId(), partSize, partNum);
RedisUtils.setCacheMapValueIfExist(filePartKey, String.valueOf(partETag.getPartNumber()), partETag.getETag());
} catch (IOException e) {
throw new ServiceException(e.getMessage());
}
} else {
fileUploadResult.setSkipUpload(true);
//partETag=new PartETag(partNum,value);
}
return fileUploadResult;
}
private FileUploadResult getObjectFromChunkFileInfo(ChunkFileInfoBO chunkFileInfoBO) {
FileUploadResult fileUploadResult = new FileUploadResult();
fileUploadResult.setSkipUpload(false);
fileUploadResult.setCurrentChunkNum(chunkFileInfoBO.getChunkNumber());
fileUploadResult.setFileName(chunkFileInfoBO.getFilename());
fileUploadResult.setIdentifier(chunkFileInfoBO.getIdentifier());
fileUploadResult.setRequestId(chunkFileInfoBO.getRequestId());
fileUploadResult.setNeedMerge(chunkFileInfoBO.getTotalChunks() > 1);
return fileUploadResult;
}
/**
* 文件分片上传完成 合并
*
* @param fileMergeBO 分片合并上传参数
* @return vo
*/
@Override
public SysOssVo chunkUploadMerge(FileMergeBO fileMergeBO) {
String identifier = fileMergeBO.getIdentifier();
OssClient storage = OssFactory.instance();
String fileInfoKey = OssConstant.CHUNK_FILE_INFO_PREFIX + identifier;
if (!RedisUtils.isExistsObject(fileInfoKey)) {
throw new ServiceException("未找到该文件上传标识:" + identifier + "对应的文件上传信息");
}
SysOssRedisDTO ossRedisDTO = RedisUtils.getCacheObject(fileInfoKey);
String filePartKey = OssConstant.CHUNK_FILE_PART_PREFIX + identifier;
if (!RedisUtils.isExistsObject(filePartKey)) {
throw new ServiceException("未找到该文件上传标识:" + identifier + "上传成功的分片信息");
}
//获取已上传的所有分片信息
List<PartETag> partETagList = getUploadedPartETagList(filePartKey, fileMergeBO.getTotalChunks(), identifier);
try {
storage.chunkUploadComplete(ossRedisDTO.getFilePathName(), ossRedisDTO.getUploadId(), partETagList);
} catch (Exception e) {
SysOss oss = baseMapper.selectOne(new LambdaQueryWrapper<SysOss>().eq(SysOss::getRequestId, fileMergeBO.getRequestId()));
if (FileUploadStatus.UPLOADED.equals(oss.getUploadStatus())) {
// 更新文件信息
return this.updateResultEntity(ossRedisDTO, storage, fileMergeBO.getFilename(), fileMergeBO.getRequestId(), filePartKey);
}
throw new ServiceException("[文件合并失败]:" + e.getMessage());
}
return this.updateResultEntity(ossRedisDTO, storage, fileMergeBO.getFilename(), fileMergeBO.getRequestId(), filePartKey);
}
private SysOssVo updateResultEntity(SysOssRedisDTO ossRedisDTO, OssClient storage,
String originalName, String requestId, String filePartKey) {
SysOss oss = new SysOss();
oss.setUploadStatus(FileUploadStatus.UPLOADED.getCode());
baseMapper.update(oss, new LambdaUpdateWrapper<SysOss>()
.eq(SysOss::getIdentifier, ossRedisDTO.getIdentifier())
.eq(SysOss::getUploadStatus, FileUploadStatus.UPLOADING.getCode())
.eq(SysOss::getService, storage.getConfigKey()));
RedisUtils.expire(filePartKey, Duration.ofMinutes(10));
SysOssVo sysOssVo = new SysOssVo();
//sysOssVo.setOssId(ossFileId);
sysOssVo.setRequestId(requestId);
sysOssVo.setService(ossRedisDTO.getOssService());
sysOssVo.setOriginalName(originalName);
sysOssVo.setUrl(ossRedisDTO.getUrl());
return this.matchingUrl(sysOssVo);
}
/**
* 获取已上传的分片信息
*
* @param filePartKey
* @param totalChunks
* @param identifier
* @return java.util.List<com.amazonaws.services.s3.model.PartETag>
* @author 11298
* @date 20:29 2023/8/22
**/
private List<PartETag> getUploadedPartETagList(String filePartKey, int totalChunks, String identifier) {
Map<String, String> partETagMap = RedisUtils.getCacheMap(filePartKey);
if (partETagMap.size() != totalChunks) {
throw new ServiceException("该文件标识:" + identifier + "对应的分片数据未完全上传成功");
}
Set<String> set = new HashSet<>();
partETagMap.forEach((key, value) -> {
if ("".equals(value)) {
set.add(key);
}
});
if (set.size() > 0) {
throw new ServiceException("文件上传失败,第" + set + "分片未上传成功");
}
List<PartETag> partETagList = new ArrayList<>();
partETagMap.entrySet().forEach(ele -> {
partETagList.add(new PartETag(Integer.parseInt(ele.getKey()), ele.getValue()));
});
return partETagList;
}
SysOssController
// ============================== 分片上传 ============================
/**
* 分片文件上传OSS对象存储,秒传检验
*
* @param chunkFileInfoBO 分片文件信息
* @return 检验结果
*/
@SaCheckPermission("system:oss:upload")
//@Log(title = "OSS对象存储", businessType = BusinessType.INSERT)
@GetMapping(value = "/chunkUpload")
public R<FileUploadResult> chunkUpload(ChunkFileInfoBO chunkFileInfoBO) {
if (ObjectUtil.isNull(chunkFileInfoBO)) {
return R.fail("分片查询条件不能为空");
}
FileUploadResult fileUploadResult = ossService.getChunkUpload(chunkFileInfoBO);
return R.ok(fileUploadResult);
}
/**
* 分片文件上传 OSS对象存储
*
* @param file 分片文件
* @param chunkFileInfoBO 分片文件信息
* @return R 上传结果
*/
@SaCheckPermission("system:oss:upload")
//@Log(title = "OSS对象存储", businessType = BusinessType.INSERT)
@PostMapping(value = "/chunkUpload", consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
public R<FileUploadResult> chunkUpload(@RequestPart("file") MultipartFile file, ChunkFileInfoBO chunkFileInfoBO) {
if (ObjectUtil.isNull(file)) {
return R.fail("上传文件不能为空");
}
FileUploadResult fileUploadResult = ossService.chunkUpload(file, chunkFileInfoBO);
return R.ok(fileUploadResult);
}
/**
* 分片文件上传 OSS对象存储完成 合并
*
* @param fileMergeBO 文件分片上传完成对象
*/
@SaCheckPermission("system:oss:upload")
@Log(title = "OSS对象存储(分片文件)", businessType = BusinessType.INSERT)
@PostMapping(value = "/chunkUploadMerge")
public R<Map<String, String>> chunkUploadMerge(@Validated @RequestBody FileMergeBO fileMergeBO) {
SysOssVo oss = ossService.chunkUploadMerge(fileMergeBO);
Map<String, String> map = new HashMap<>(2);
map.put("url", oss.getUrl());
map.put("fileName", fileMergeBO.getFilename());
map.put("requestId", fileMergeBO.getRequestId());
map.put("identifier", fileMergeBO.getIdentifier());
return R.ok(map);
}
```
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。