赞
踩
一般而言,如果您的对象大小达到了 100 MB,您应该考虑使用分段上传,而不是在单个操作中上传对象。
网上的代码大部分要么是单线程执行很慢,要么就是本地的临时文件(File)没有删除,这里参考了网上现有的代码做了一些整合,具体代码如下:
/**
* 分段上传文件至S3
* @param file
* @param s3Key
*/
public void uploadMultipartFileByPart(MultipartFile file, String s3Key) {
//声明s3及桶名,这里换成自己的就好
AmazonS3 s3;
String bucketName = "bucketName";
//声明线程池
ExecutorService exec = Executors.newFixedThreadPool(3);
long size = file.getSize();
int minPartSize = 5 * 1024 * 1024;
// 得到总共的段数,和 分段后,每个段的开始上传的字节位置
List<Long> positions = Collections.synchronizedList(new ArrayList<>());
long filePosition = 0;
while (filePosition < size) {
positions.add(filePosition);
filePosition += Math.min(minPartSize, (size - filePosition));
}
log.info("总大小:{},分为{}段", size, positions.size());
// 创建一个列表保存所有分传的 PartETag, 在分段完成后会用到
List<PartETag> partETags = Collections.synchronizedList(new ArrayList<>());
// 第一步,初始化,声明下面将有一个 Multipart Upload
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, s3Key);
InitiateMultipartUploadResult initResponse = s3.initiateMultipartUpload(initRequest);
log.info("开始上传");
long begin = System.currentTimeMillis();
try {
// MultipartFile 转 File
File toFile = multipartFileToFile(file);
for (int i = 0; i < positions.size(); i++) {
int finalI = i;
exec.execute(() -> {
long time1 = System.currentTimeMillis();
UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(s3Key)
.withUploadId(initResponse.getUploadId())
.withPartNumber(finalI + 1)
.withFileOffset(positions.get(finalI))
.withFile(toFile)
.withPartSize(Math.min(minPartSize, (size - positions.get(finalI))));
// 第二步,上传分段,并把当前段的 PartETag 放到列表中
partETags.add(s3.uploadPart(uploadRequest).getPartETag());
long time2 = System.currentTimeMillis();
log.info("第{}段上传耗时:{}", finalI + 1, (time2 - time1));
});
}
//任务结束关闭线程池
exec.shutdown();
//判断线程池是否结束,不加会直接结束方法
while (true) {
if (exec.isTerminated()) {
break;
}
}
// 第三步,完成上传,合并分段
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, s3Key,
initResponse.getUploadId(), partETags);
s3.completeMultipartUpload(compRequest);
//删除本地缓存文件
toFile.delete();
} catch (Exception e) {
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, s3Key, initResponse.getUploadId()));
log.error("Failed to upload, " + e.getMessage());
}
long end = System.currentTimeMillis();
log.info("总上传耗时:{}", (end - begin));
}
/**
* MultipartFile 转 File
*/
public static File multipartFileToFile(MultipartFile file) throws Exception {
File toFile = null;
if (file.equals("") || file.getSize() <= 0) {
file = null;
} else {
InputStream ins = null;
ins = file.getInputStream();
toFile = new File(file.getOriginalFilename());
//获取流文件
OutputStream os = new FileOutputStream(toFile);
int bytesRead = 0;
byte[] buffer = new byte[8192];
while ((bytesRead = ins.read(buffer, 0, 8192)) != -1) {
os.write(buffer, 0, bytesRead);
}
os.close();
ins.close();
}
return toFile;
}
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。