赞
踩
前端代码:
- <!--上传附件弹出框 -->
- <el-dialog v-dialogDrag title="文件上传" center v-model="uploadVisible" width="60%" @close="handlerClose" destroy-on-close>
- <UploadBigFile class="uploadSlot" @closeFileDialog="closeFileDialog"></UploadBigFile>
- </el-dialog>
弹出框代码:
- <template>
- <!-- 上传器 -->
- <uploader
- ref="uploader"
- :options="options"
- :autoStart=false
- :file-status-text="fileStatusText"
- @file-added="onFileAdded"
- @file-success="onFileSuccess"
- @file-progress="onFileProgress"
- @file-error="onFileError">
- <uploader-unsupport></uploader-unsupport>
- <uploader-drop>
- <div>
- <uploader-btn id="global-uploader-btn" :attrs="attrs" ref="uploadBtn">选择文件<i class="el-icon-upload el-icon--right"></i></uploader-btn>
- </div>
- </uploader-drop>
- <uploader-list></uploader-list>
- </uploader>
- </template>
-
- <script>
- import {ACCEPT_CONFIG} from '../../../assets/js/config';
- import SparkMD5 from 'spark-md5';
- import {mergeFile} from "@/api/tool/uploadFile";
- import { getToken } from "@/utils/auth";
-
- export default {
- data () {
- return {
- options: {
- //目标上传 URL,默认POST
- target: process.env.VITE_APP_BASE_API +"/vm/chunk",
- //分块大小(单位:字节) 单个分片暂定200M
- chunkSize: '204800000',
- //上传文件时文件内容的参数名,对应chunk里的Multipart对象名,默认对象名为file
- fileParameterName: 'upfile',
- //失败后最多自动重试上传次数
- maxChunkRetries: 3,
- //是否开启服务器分片校验,对应GET类型同名的target URL
- testChunks: true,
- headers: { Authorization: "Bearer " + getToken() },
- /*
- 服务器分片校验函数,判断秒传及断点续传,传入的参数是Uploader.Chunk实例以及请求响应信息
- reponse码是successStatuses码时,才会进入该方法
- reponse码如果返回的是permanentErrors 中的状态码,不会进入该方法,直接进入onFileError函数 ,并显示上传失败
- reponse码是其他状态码,不会进入该方法,正常走标准上传
- checkChunkUploadedByResponse函数直接return true的话,不再调用上传接口
- */
- checkChunkUploadedByResponse: function (chunk, response_msg) {
- // console.log("response_msg的值",response_msg)
- let objMessage = JSON.parse(response_msg);
- if (objMessage.skipUpload) {
- return true;
- }
- return (objMessage.uploadedChunks || []).indexOf(chunk.offset + 1) >= 0;
- }
- },
- attrs: {
- //上传文件的类型
- accept: ACCEPT_CONFIG.getAll()
- },
- fileStatusText: {
- success: '上传成功',
- error: '上传失败',
- uploading: '上传中',
- paused: '暂停',
- waiting: '等待上传'
- },
- }
- },
- methods: {
- onFileAdded(file) {
- this.computeMD5(file);
- },
- /*
- 第一个参数 rootFile 就是成功上传的文件所属的根 Uploader.File 对象,它应该包含或者等于成功上传文件;
- 第二个参数 file 就是当前成功的 Uploader.File 对象本身;
- 第三个参数就是 message 就是服务端响应内容,永远都是字符串;
- 第四个参数 chunk 就是 Uploader.Chunk 实例,它就是该文件的最后一个块实例,如果你想得到请求响应码的话,chunk.xhr.status就是
- */
- onFileSuccess(rootFile, file, response, chunk) {
- //refProjectId为预留字段,可关联附件所属目标,例如所属档案,所属工程等
- file.refProjectId = "ommb";
- this.$emit('closeFileDialog');
-
- mergeFile(file).then( responseData=> {
- if(responseData === "Failure"){
- console.log("合并操作未成功");
- }
- }).catch(function (error){
- console.log("合并后捕获的未知异常:"+error);
- });
- },
- onFileProgress(rootFile, file, chunk) {
- console.log(`上传中 ${file.name},chunk:${chunk.startByte / 1024 / 1024} ~ ${chunk.endByte / 1024 / 1024}`)
- },
- onFileError(rootFile, file, response, chunk) {
- console.log('上传完成后异常信息:'+response);
- this.$message({
- message: response,
- type: 'error'
- })
- },
-
- /**
- * 计算md5,实现断点续传及秒传
- * @param file
- */
- computeMD5(file) {
- file.pause();
-
- //单个文件的大小限制2G
- let fileSizeLimit = 2 * 1024 * 1024 * 1024;
- // console.log("文件大小:"+file.size);
- // console.log("限制大小:"+fileSizeLimit);
- if(file.size > fileSizeLimit){
- this.$message({
- showClose: true,
- message: '文件大小不能超过2G'
- });
- file.cancel();
- }
-
- let fileReader = new FileReader();
- let time = new Date().getTime();
- let blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
- let currentChunk = 0;
- const chunkSize = 10 * 1024 * 1000;
- let chunks = Math.ceil(file.size / chunkSize);
- let spark = new SparkMD5.ArrayBuffer();
- //由于计算整个文件的Md5太慢,因此采用只计算第1块文件的md5的方式
- let chunkNumberMD5 = 1;
-
- loadNext();
-
- fileReader.onload = (e => {
- spark.append(e.target.result);
-
- if (currentChunk < chunkNumberMD5) {
- currentChunk++;
- loadNext();
-
- // 实时展示MD5的计算进度
- this.$nextTick(() => {
- $(`.myStatus_${file.id}`).text('校验MD5 ' + ((currentChunk / chunks) * 100).toFixed(0) + '%')
- })
- } else {
- let md5 = spark.end();
- file.uniqueIdentifier = md5;
- file.resume();
- // console.log(`MD5计算完毕:${file.name} \nMD5:${md5} \n分片:${chunks} 大小:${file.size} 用时:${new Date().getTime() - time} ms`);
- }
- });
-
- fileReader.onerror = function () {
- this.error(`文件${file.name}读取出错,请检查该文件`)
- file.cancel();
- };
-
- function loadNext() {
- let start = currentChunk * chunkSize;
- let end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize;
-
- fileReader.readAsArrayBuffer(blobSlice.call(file.file, start, end));
- currentChunk++;
- // console.log("计算第"+currentChunk+"块");
- }
- },
- close() {
- this.uploader.cancel();
- },
-
- /**
- * 新增的自定义的状态: 'md5'、'transcoding'、'failed'
- * @param id
- * @param status
- */
- statusSet(id, status) {
- let statusMap = {
- md5: {
- text: '校验MD5',
- bgc: '#fff'
- },
- merging: {
- text: '合并中',
- bgc: '#e2eeff'
- },
- transcoding: {
- text: '转码中',
- bgc: '#e2eeff'
- },
- failed: {
- text: '上传失败',
- bgc: '#e2eeff'
- }
- }
-
- this.$nextTick(() => {
- $(`<p class="myStatus_${id}"></p>`).appendTo(`.file_${id} .uploader-file-status`).css({
- 'position': 'absolute',
- 'top': '0',
- 'left': '0',
- 'right': '0',
- 'bottom': '0',
- 'zIndex': '1',
- 'line-height': 'initial',
- 'backgroundColor': statusMap[status].bgc
- }).text(statusMap[status].text);
- })
- },
- statusRemove(id) {
- this.$nextTick(() => {
- $(`.myStatus_${id}`).remove();
- })
- },
-
- error(msg) {
- this.$notify({
- title: '错误',
- message: msg,
- type: 'error',
- duration: 2000
- })
- }
- }
- }
- </script>
-
- <style scoped>
- .handle-box {
- margin-bottom: 20px;
- }
-
- .handle-select {
- width: 120px;
- }
-
- .handle-input {
- width: 300px;
- display: inline-block;
- }
-
- .table {
- width: 1200px;
- font-size: 14px;
- }
-
- .red {
- color: #ff0000;
- }
-
- .mr10 {
- margin-right: 10px;
- }
-
- .table-td-thumb {
- display: block;
- margin: auto;
- width: 40px;
- height: 40px;
- }
-
- .uploadSlot {
- margin: -10px 10px 10px 30px;
- }
- </style>
后台代码:
- @Service
- public class SysChunkServiceImpl implements ISysChunkService {
- private static final Logger logger = LoggerFactory.getLogger(SysChunkServiceImpl.class);
-
- /**
- * 上传文件存储在本地的根路径
- */
- @Value("${file.path}")
- private String localFilePath;
-
- @Autowired
- private SysChunkMapper sysChunkMapper;
- @Autowired
- private VersionPackageMapper versionPackageMapper;
-
- /**
- * 查询版本管理
- *
- * @param id 版本管理主键
- * @return 版本管理
- */
- @Override
- public SysChunk selectSysChunkById(String id) {
- return sysChunkMapper.selectSysChunkById(id);
- }
-
- /**
- * 查询版本管理列表
- *
- * @param sysChunk 版本管理
- * @return 版本管理
- */
- @Override
- public List<SysChunk> selectSysChunkList(SysChunk sysChunk) {
- return sysChunkMapper.selectSysChunkList(sysChunk);
- }
-
- /**
- * 新增版本管理
- *
- * @param sysChunk 版本管理
- * @return 结果
- */
- @Override
- public int insertSysChunk(SysChunk sysChunk) {
- sysChunk.setId(SnowflakeIdWorker.getUUID());
- return sysChunkMapper.insertSysChunk(sysChunk);
- }
-
- /**
- * 修改版本管理
- *
- * @param sysChunk 版本管理
- * @return 结果
- */
- @Override
- public int updateSysChunk(SysChunk sysChunk) {
- return sysChunkMapper.updateSysChunk(sysChunk);
- }
-
- /**
- * 批量删除版本管理
- *
- * @param ids 需要删除的版本管理主键
- * @return 结果
- */
- @Override
- public int deleteSysChunkByIds(String[] ids) {
- return sysChunkMapper.deleteSysChunkByIds(ids);
- }
-
- /**
- * 删除分片
- *
- * @param id 版本管理主键
- * @return 结果
- */
- @Override
- public int deleteSysChunkById(String id) {
- return sysChunkMapper.deleteSysChunkById(id);
- }
-
- @Override
- public int deleteSysChunkByIdentifier(String identifier) {
- return sysChunkMapper.deleteSysChunkByIdentifier(identifier);
- }
-
- /**
- * 删除版本管理信息
- *
- * @param versionPackage
- * @return
- */
- @Transactional(rollbackFor = Exception.class)
- @Override
- public int deleteFile(VersionPackage versionPackage) {
- //删除版本
- versionPackageMapper.deleteVersionPackageByVersionId(versionPackage.getVersionId());
- //删除版本分片
- sysChunkMapper.deleteSysChunkByIdentifier(versionPackage.getIdentifier());
- //删除版本存放的目录
- FileUtils.delFile(versionPackage.getLocation());
-
- return 1;
- }
-
- @Override
- public String uploadChunk(SysChunk chunk) {
- String apiRlt = VmConstants.SUCCESS_CODE;
-
- MultipartFile file = chunk.getUpfile();
-
- logger.info("file originName: {}, chunkNumber: {}", file.getOriginalFilename(), chunk.getChunkNumber());
-
- try {
- byte[] bytes = file.getBytes();
- Path path = Paths.get(generatePath(localFilePath, chunk));
- //文件写入指定路径
- Files.write(path, bytes);
- if (insertSysChunk(chunk) < 0) {
- apiRlt = VmConstants.FAIL_CODE;
- }
-
- } catch (IOException e) {
- logger.error("uploadChunk IOException" , e);
- apiRlt = VmConstants.FAIL_CODE;
- }
- return apiRlt;
- }
-
- @Override
- public UploadResult checkChunk(SysChunk chunk) {
- UploadResult ur = new UploadResult();
- //完整文件路径
- String file = localFilePath + "/" + chunk.getIdentifier() + "/" + chunk.getFilename();
-
- //先判断整个文件是否已经上传过了,如果是,则告诉前端跳过上传,实现秒传
- if (fileExists(file)) {
- ur.setSkipUpload(true);
- ur.setLocation(file);
- ur.setMessage("完整文件已存在,直接跳过上传,实现秒传");
- return ur;
- }
-
- //如果完整文件不存在,则去数据库判断当前哪些文件块已经上传过了,把结果告诉前端,跳过这些文件块的上传,实现断点续传
- ArrayList<Integer> list = sysChunkMapper.selectChunkNumbers(chunk);
- if (CollectionUtils.isNotEmpty(list)) {
- ur.setSkipUpload(false);
- ur.setUploadedChunks(list);
- ur.setMessage("部分文件块已存在,继续上传剩余文件块,实现断点续传");
- return ur;
- }
- return ur;
- }
-
- /**
- * 生成上传后的文件路径
- *
- * @param uploadFolder 本地路径
- * @param chunk 分片信息
- * @return
- */
- public String generatePath(String uploadFolder, SysChunk chunk) {
- StringBuilder sb = new StringBuilder();
- sb.append(uploadFolder).append("/").append(chunk.getIdentifier());
-
- //把字符串拼接为Path
- Path path = Paths.get(sb.toString());
- //判断uploadFolder/identifier 路径是否存在,不存在则创建
- if (!Files.isWritable(path)) {
- logger.info("path not exist,create path: {}", sb.toString());
- try {
- Files.createDirectories(path);
- } catch (IOException e) {
- logger.error(e.getMessage(), e);
- }
- }
-
- return sb.append("/")
- .append(chunk.getFilename())
- .append("-")
- .append(chunk.getChunkNumber()).toString();
- }
-
- /**
- * 根据文件的全路径名判断文件是否存在
- *
- * @param file
- * @return
- */
- public boolean fileExists(String file) {
- Path path = Paths.get(file);
- boolean fileExists = Files.exists(path, new LinkOption[]{LinkOption.NOFOLLOW_LINKS});
- return fileExists;
- }
-
- @Override
- public String mergeFile(SysFileVO fileInfoVO) {
- String rlt = VmConstants.FAIL;
-
- //前端组件参数转换为model对象
- VersionPackage fileInfo = buildVersionPackage(fileInfoVO);
-
- LoginUser user = SecurityUtils.getLoginUser();
- Date now = DateUtils.getNowDate();
- fileInfo.setCreateBy(user.getUsername());
- fileInfo.setCreateTime(now);
- fileInfo.setUpdateBy(user.getUsername());
- fileInfo.setUpdateTime(now);
-
- String filename = fileInfoVO.getName();
- //进行文件的合并操作
- String file = localFilePath + "/" + fileInfo.getIdentifier() + "/" + filename;
- String folder = localFilePath + "/" + fileInfo.getIdentifier();
- String fileSuccess = merge(file, folder, filename);
-
- fileInfo.setLocation(file);
-
- //文件合并成功后,保存记录至数据库
- if (VmConstants.SUCCESS_CODE.equals(fileSuccess)) {
- if (versionPackageMapper.insertVersionPackage(fileInfo) > 0) {
- rlt = VmConstants.SUCCESS;
- }
- }
-
- //如果已经存在,则判断是否同一个项目,同一个项目的不用新增记录,否则新增
- if (VmConstants.EXIST_CODE.equals(fileSuccess)) {
- List<VersionPackage> tfList = versionPackageMapper.selectVersionPackageList(fileInfo);
- if (tfList != null) {
- if (tfList.size() == 0 || (tfList.size() > 0 && !fileInfo.getSuitScope().equals(tfList.get(0).getSuitScope()))) {
- if (versionPackageMapper.insertVersionPackage(fileInfo) > 0) {
- rlt = VmConstants.SUCCESS;
- }
- }
- }
- }
- return rlt;
- }
-
- /**
- * 根据上传的文件构造版本包对象
- * @param fileInfoVO
- * @return
- */
- private VersionPackage buildVersionPackage(SysFileVO fileInfoVO) {
- String filename = fileInfoVO.getName();
- String id = SnowflakeIdWorker.getUUID();
-
- VersionPackage fileInfo = new VersionPackage();
- fileInfo.setVersionId(id);
- fileInfo.setGenerateDate(new Date());
- fileInfo.setVersionPackage(filename);
- fileInfo.setVersionType("1");
- fileInfo.setVersionRadio("1");
- //第一个下划线到最后一个点
- int startIndex = filename.indexOf("_");
- int endIndex = filename.lastIndexOf(".");
- String versionNo = filename.substring(startIndex+1 , endIndex);
- fileInfo.setVersionNo(versionNo);
- fileInfo.setIdentifier(fileInfoVO.getUniqueIdentifier());
- fileInfo.setTotalSize(fileInfoVO.getSize());
- fileInfo.setSuitScope(fileInfoVO.getRefProjectId());
- fileInfo.setStatus("0");
- return fileInfo;
- }
-
- /**
- * 文件合并
- *
- * @param file
- * @param folder
- * @param filename
- * @return
- */
- public String merge(String file, String folder, String filename) {
- //默认合并成功
- String rlt = VmConstants.SUCCESS_CODE;
-
- try {
- //先判断文件是否存在
- if (fileExists(file)) {
- //文件已存在
- return VmConstants.EXIST_CODE;
- }
-
- //不存在的话,进行合并
- Files.createFile(Paths.get(file));
-
- Files.list(Paths.get(folder))
- .filter(path -> !path.getFileName().toString().equals(filename))
- .sorted((o1, o2) -> {
- String p1 = o1.getFileName().toString();
- String p2 = o2.getFileName().toString();
- int i1 = p1.lastIndexOf("-");
- int i2 = p2.lastIndexOf("-");
- return Integer.valueOf(p2.substring(i2)).compareTo(Integer.valueOf(p1.substring(i1)));
- })
- .forEach(path -> {
- try {
- //以追加的形式写入文件
- Files.write(Paths.get(file), Files.readAllBytes(path), StandardOpenOption.APPEND);
- //合并后删除该块
- Files.delete(path);
- } catch (IOException e) {
- logger.error(e.getMessage(), e);
- }
- });
- } catch (IOException e) {
- logger.error(e.getMessage(), e);
- //合并失败
- rlt = VmConstants.FAIL_CODE;
- }
-
- return rlt;
- }
-
- }
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。