赞
踩
最近写项目碰到个需求,视频大文件上传,为了方便需要实现分片上传和断点续传。在网上找了很长时间,有很多文章,自己也都一一去实现,最终找到了这个博主发的文章,思路很清晰,前后端的代码也都有。在这里复刻一遍防止以后遇到,感谢大佬!
参考文章:基于 vue-simple-uploader 实现大文件断点续传和分片上传
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.wolfe</groupId>
<artifactId>point-upload</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>point-upload</name>
<description>point-upload</description>
<properties>
<java.version>1.8</java.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<spring-boot.version>2.3.7.RELEASE</spring-boot.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.mybatis.spring.boot</groupId>
<artifactId>mybatis-spring-boot-starter</artifactId>
<version>2.1.4</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.junit.vintage</groupId>
<artifactId>junit-vintage-engine</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-dependencies</artifactId>
<version>${spring-boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<version>2.3.7.RELEASE</version>
<configuration>
<mainClass>com.wolfe.pointupload.PointUploadApplication</mainClass>
</configuration>
<executions>
<execution>
<id>repackage</id>
<goals>
<goal>repackage</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
server:
port: 8081
spring:
application:
name: point-upload
datasource:
driver-class-name: com.mysql.cj.jdbc.Driver
name: defaultDataSource
url: jdbc:mysql://localhost:3306/pointupload?serverTimezone=UTC
username: root
password: root
servlet:
multipart:
max-file-size: 100MB
max-request-size: 200MB
mybatis:
mapper-locations: classpath:mapper/*xml
type-aliases-package: com.wolfe.pointupload.mybatis.entity
config:
upload-path: E:/Wolfe/uploadPath
package com.wolfe.pointupload.common;
public interface HttpStatus {
/**
* 操作成功
*/
int SUCCESS = 200;
/**
* 系统内部错误
*/
int ERROR = 500;
}
package com.wolfe.pointupload.common;
import java.util.HashMap;
public class AjaxResult extends HashMap<String, Object> {
private static final long serialVersionUID = 1L;
/**
* 状态码
*/
public static final String CODE_TAG = "code";
/**
* 返回内容
*/
public static final String MSG_TAG = "msg";
/**
* 数据对象
*/
public static final String DATA_TAG = "data";
/**
* 初始化一个新创建的 AjaxResult 对象,使其表示一个空消息。
*/
public AjaxResult() {
}
/**
* 初始化一个新创建的 AjaxResult 对象
*
* @param code 状态码
* @param msg 返回内容
*/
public AjaxResult(int code, String msg) {
super.put(CODE_TAG, code);
super.put(MSG_TAG, msg);
super.put("result", true);
super.put("needMerge", true);
}
/**
* 初始化一个新创建的 AjaxResult 对象
*
* @param code 状态码
* @param msg 返回内容
* @param data 数据对象
*/
public AjaxResult(int code, String msg, Object data) {
super.put(CODE_TAG, code);
super.put(MSG_TAG, msg);
super.put("result", true);
super.put("needMerge", true);
if (data != null) {
super.put(DATA_TAG, data);
}
}
/**
* 返回成功消息
*
* @return 成功消息
*/
public static AjaxResult success() {
return AjaxResult.success("操作成功");
}
/**
* 返回成功数据
*
* @return 成功消息
*/
public static AjaxResult success(Object data) {
return AjaxResult.success("操作成功", data);
}
/**
* 返回成功消息
*
* @param msg 返回内容
* @return 成功消息
*/
public static AjaxResult success(String msg) {
return AjaxResult.success(msg, null);
}
/**
* 返回成功消息
*
* @param msg 返回内容
* @param data 数据对象
* @return 成功消息
*/
public static AjaxResult success(String msg, Object data) {
return new AjaxResult(HttpStatus.SUCCESS, msg, data);
}
/**
* 返回错误消息
*
* @return
*/
public static AjaxResult error() {
return AjaxResult.error("操作失败");
}
/**
* 返回错误消息
*
* @param msg 返回内容
* @return 警告消息
*/
public static AjaxResult error(String msg) {
return AjaxResult.error(msg, null);
}
/**
* 返回错误消息
*
* @param msg 返回内容
* @param data 数据对象
* @return 警告消息
*/
public static AjaxResult error(String msg, Object data) {
return new AjaxResult(HttpStatus.ERROR, msg, data);
}
/**
* 返回错误消息
*
* @param code 状态码
* @param msg 返回内容
* @return 警告消息
*/
public static AjaxResult error(int code, String msg) {
return new AjaxResult(code, msg, null);
}
}
package com.wolfe.pointupload.common;
public interface CommonConstant {
/**
* 更新或新增是否成功 0为失败 1为成功
* 当要增加的信息已存在时,返回为-1
*/
Integer UPDATE_FAIL = 0;
Integer UPDATE_EXISTS = -1;
}
package com.wolfe.pointupload.config;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.CorsRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
/**
* 配置类, 容许跨域访问
*/
@Configuration
public class WebConfig implements WebMvcConfigurer {
@Override
public void addCorsMappings(CorsRegistry registry) {
registry.addMapping("/**")
.allowedOrigins("*")
.allowedMethods("GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS")
.allowCredentials(true)
.maxAge(3600)
.allowedHeaders("*");
}
}
package com.wolfe.pointupload.domain;
import lombok.Data;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
@Data
public class CheckChunkVO implements Serializable {
private boolean skipUpload = false;
private String url;
private List<Integer> uploaded = new ArrayList<>();
private boolean needMerge = true;
private boolean result = true;
}
package com.wolfe.pointupload.domain;
import lombok.Data;
import org.springframework.web.multipart.MultipartFile;
import java.io.Serializable;
@Data
public class BackChunk implements Serializable {
/**
* 主键ID
*/
private Long id;
/**
* 当前文件块,从1开始
*/
private Integer chunkNumber;
/**
* 分块大小
*/
private Long chunkSize;
/**
* 当前分块大小
*/
private Long currentChunkSize;
/**
* 总大小
*/
private Long totalSize;
/**
* 文件标识
*/
private String identifier;
/**
* 文件名
*/
private String filename;
/**
* 相对路径
*/
private String relativePath;
/**
* 总块数
*/
private Integer totalChunks;
/**
* 二进制文件
*/
private MultipartFile file;
}
package com.wolfe.pointupload.domain;
import lombok.Data;
@Data
public class BackFileList {
private static final long serialVersionUID = 1L;
/**
* 主键ID
*/
private Long id;
/**
* 文件名
*/
private String filename;
/**
* 唯一标识,MD5
*/
private String identifier;
/**
* 链接
*/
private String url;
/**
* 本地地址
*/
private String location;
/**
* 文件总大小
*/
private Long totalSize;
}
package com.wolfe.pointupload.mapper;
import com.wolfe.pointupload.domain.BackChunk;
import java.util.List;
/**
* 文件分片管理Mapper接口
*/
public interface BackChunkMapper {
/**
* 查询文件分片管理
*
* @param id 文件分片管理ID
* @return 文件分片管理
*/
public BackChunk selectBackChunkById(Long id);
/**
* 查询文件分片管理列表
*
* @param backChunk 文件分片管理
* @return 文件分片管理集合
*/
public List<BackChunk> selectBackChunkList(BackChunk backChunk);
/**
* 新增文件分片管理
*
* @param backChunk 文件分片管理
* @return 结果
*/
public int insertBackChunk(BackChunk backChunk);
/**
* 修改文件分片管理
*
* @param backChunk 文件分片管理
* @return 结果
*/
public int updateBackChunk(BackChunk backChunk);
/**
* 删除文件分片管理
*
* @param id 文件分片管理ID
* @return 结果
*/
public int deleteBackChunkById(Long id);
/**
* 功能描述: 根据文件名和MD5值删除chunk记录
*
* @param:
* @return:
* @author: xjd
* @date: 2020/7/31 23:43
*/
int deleteBackChunkByIdentifier(BackChunk backChunk);
/**
* 批量删除文件分片管理
*
* @param ids 需要删除的数据ID
* @return 结果
*/
public int deleteBackChunkByIds(Long[] ids);
}
package com.wolfe.pointupload.mapper;
import com.wolfe.pointupload.domain.BackFileList;
import java.util.List;
/**
* 已上传文件列表Mapper接口
*/
public interface BackFileListMapper {
/**
* 查询已上传文件列表
*
* @param id 已上传文件列表ID
* @return 已上传文件列表
*/
public BackFileList selectBackFileListById(Long id);
/**
* 功能描述: 查询单条已上传文件记录
*
* @param: BackFileList 已上传文件列表
*/
Integer selectSingleBackFileList(BackFileList BackFileList);
/**
* 查询已上传文件列表列表
*
* @param BackFileList 已上传文件列表
* @return 已上传文件列表集合
*/
public List<BackFileList> selectBackFileListList(BackFileList BackFileList);
/**
* 新增已上传文件列表
*
* @param BackFileList 已上传文件列表
* @return 结果
*/
public int insertBackFileList(BackFileList BackFileList);
/**
* 修改已上传文件列表
*
* @param BackFileList 已上传文件列表
* @return 结果
*/
public int updateBackFileList(BackFileList BackFileList);
/**
* 删除已上传文件列表
*
* @param id 已上传文件列表ID
* @return 结果
*/
public int deleteBackFileListById(Long id);
/**
* 批量删除已上传文件列表
*
* @param ids 需要删除的数据ID
* @return 结果
*/
public int deleteBackFileListByIds(Long[] ids);
}
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE mapper
PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.wolfe.pointupload.mapper.BackFileListMapper">
<resultMap type="BackFileList" id="BackFileListResult">
<result property="id" column="id" />
<result property="filename" column="filename" />
<result property="identifier" column="identifier" />
<result property="url" column="url" />
<result property="location" column="location" />
<result property="totalSize" column="total_size" />
</resultMap>
<sql id="selectBackFileListVo">
select id, filename, identifier, url, location, total_size from t_file_list
</sql>
<select id="selectBackFileListList" parameterType="BackFileList" resultMap="BackFileListResult">
<include refid="selectBackFileListVo"/>
<where>
<if test="filename != null and filename != ''"> and filename = #{filename}</if>
<if test="identifier != null and identifier != ''"> and identifier = #{identifier}</if>
<if test="url != null and url != ''"> and url = #{url}</if>
<if test="location != null and location != ''"> and location = #{location}</if>
<if test="totalSize != null "> and total_size = #{totalSize}</if>
</where>
</select>
<select id="selectBackFileListById" parameterType="Long" resultMap="BackFileListResult">
<include refid="selectBackFileListVo"/>
where id = #{id}
</select>
<select id="selectSingleBackFileList" parameterType="BackFileList" resultType="int">
select count(1) from t_file_list
<where>
<if test="filename != null and filename != ''"> and filename = #{filename}</if>
<if test="identifier != null and identifier != ''"> and identifier = #{identifier}</if>
</where>
</select>
<insert id="insertBackFileList" parameterType="BackFileList" useGeneratedKeys="true" keyProperty="id">
insert into t_file_list
<trim prefix="(" suffix=")" suffixOverrides=",">
<if test="filename != null and filename != ''">filename,</if>
<if test="identifier != null and identifier != ''">identifier,</if>
<if test="url != null and url != ''">url,</if>
<if test="location != null and location != ''">location,</if>
<if test="totalSize != null ">total_size,</if>
</trim>
<trim prefix="values (" suffix=")" suffixOverrides=",">
<if test="filename != null and filename != ''">#{filename},</if>
<if test="identifier != null and identifier != ''">#{identifier},</if>
<if test="url != null and url != ''">#{url},</if>
<if test="location != null and location != ''">#{location},</if>
<if test="totalSize != null ">#{totalSize},</if>
</trim>
</insert>
<update id="updateBackFileList" parameterType="BackFileList">
update t_file_list
<trim prefix="SET" suffixOverrides=",">
<if test="filename != null and filename != ''">filename = #{filename},</if>
<if test="identifier != null and identifier != ''">identifier = #{identifier},</if>
<if test="url != null and url != ''">url = #{url},</if>
<if test="location != null and location != ''">location = #{location},</if>
<if test="totalSize != null ">total_size = #{totalSize},</if>
</trim>
where id = #{id}
</update>
<delete id="deleteBackFileListById" parameterType="Long">
delete from t_file_list where id = #{id}
</delete>
<delete id="deleteBackFileListByIds" parameterType="String">
delete from t_file_list where id in
<foreach item="id" collection="array" open="(" separator="," close=")">
#{id}
</foreach>
</delete>
</mapper>
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE mapper
PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.wolfe.pointupload.mapper.BackChunkMapper">
<resultMap type="BackChunk" id="BackChunkResult">
<result property="id" column="id" />
<result property="chunkNumber" column="chunk_number" />
<result property="chunkSize" column="chunk_size" />
<result property="currentChunkSize" column="current_chunk_size" />
<result property="filename" column="filename" />
<result property="identifier" column="identifier" />
<result property="relativePath" column="relative_path" />
<result property="totalChunks" column="total_chunks" />
<result property="totalSize" column="total_size" />
</resultMap>
<sql id="selectBackChunkVo">
select id, chunk_number, chunk_size, current_chunk_size, filename, identifier, relative_path, total_chunks, total_size from t_chunk_info
</sql>
<select id="selectBackChunkList" parameterType="BackChunk" resultMap="BackChunkResult">
<include refid="selectBackChunkVo"/>
<where>
<if test="chunkNumber != null "> and chunk_number = #{chunkNumber}</if>
<if test="chunkSize != null "> and chunk_size = #{chunkSize}</if>
<if test="currentChunkSize != null "> and current_chunk_size = #{currentChunkSize}</if>
<if test="filename != null and filename != ''"> and filename = #{filename}</if>
<if test="identifier != null and identifier != ''"> and identifier = #{identifier}</if>
<if test="relativePath != null and relativePath != ''"> and relative_path = #{relativePath}</if>
<if test="totalChunks != null "> and total_chunks = #{totalChunks}</if>
<if test="totalSize != null "> and total_size = #{totalSize}</if>
</where>
order by chunk_number desc
</select>
<select id="selectBackChunkById" parameterType="Long" resultMap="BackChunkResult">
<include refid="selectBackChunkVo"/>
where id = #{id}
</select>
<insert id="insertBackChunk" parameterType="BackChunk" useGeneratedKeys="true" keyProperty="id">
insert into t_chunk_info
<trim prefix="(" suffix=")" suffixOverrides=",">
<if test="chunkNumber != null ">chunk_number,</if>
<if test="chunkSize != null ">chunk_size,</if>
<if test="currentChunkSize != null ">current_chunk_size,</if>
<if test="filename != null and filename != ''">filename,</if>
<if test="identifier != null and identifier != ''">identifier,</if>
<if test="relativePath != null and relativePath != ''">relative_path,</if>
<if test="totalChunks != null ">total_chunks,</if>
<if test="totalSize != null ">total_size,</if>
</trim>
<trim prefix="values (" suffix=")" suffixOverrides=",">
<if test="chunkNumber != null ">#{chunkNumber},</if>
<if test="chunkSize != null ">#{chunkSize},</if>
<if test="currentChunkSize != null ">#{currentChunkSize},</if>
<if test="filename != null and filename != ''">#{filename},</if>
<if test="identifier != null and identifier != ''">#{identifier},</if>
<if test="relativePath != null and relativePath != ''">#{relativePath},</if>
<if test="totalChunks != null ">#{totalChunks},</if>
<if test="totalSize != null ">#{totalSize},</if>
</trim>
</insert>
<update id="updateBackChunk" parameterType="BackChunk">
update t_chunk_info
<trim prefix="SET" suffixOverrides=",">
<if test="chunkNumber != null ">chunk_number = #{chunkNumber},</if>
<if test="chunkSize != null ">chunk_size = #{chunkSize},</if>
<if test="currentChunkSize != null ">current_chunk_size = #{currentChunkSize},</if>
<if test="filename != null and filename != ''">filename = #{filename},</if>
<if test="identifier != null and identifier != ''">identifier = #{identifier},</if>
<if test="relativePath != null and relativePath != ''">relative_path = #{relativePath},</if>
<if test="totalChunks != null ">total_chunks = #{totalChunks},</if>
<if test="totalSize != null ">total_size = #{totalSize},</if>
</trim>
where id = #{id}
</update>
<delete id="deleteBackChunkById" parameterType="Long">
delete from t_chunk_info where id = #{id}
</delete>
<delete id="deleteBackChunkByIdentifier" parameterType="BackChunk">
delete from t_chunk_info where identifier = #{identifier} and filename = #{filename}
</delete>
<delete id="deleteBackChunkByIds" parameterType="String">
delete from t_chunk_info where id in
<foreach item="id" collection="array" open="(" separator="," close=")">
#{id}
</foreach>
</delete>
</mapper>
package com.wolfe.pointupload.service;
import com.wolfe.pointupload.domain.BackChunk;
import com.wolfe.pointupload.domain.BackFileList;
import com.wolfe.pointupload.domain.CheckChunkVO;
import javax.servlet.http.HttpServletResponse;
public interface IBackFileService {
int postFileUpload(BackChunk chunk, HttpServletResponse response);
CheckChunkVO getFileUpload(BackChunk chunk, HttpServletResponse response);
int deleteBackFileByIds(Long id);
int mergeFile(BackFileList fileInfo);
}
package com.wolfe.pointupload.service;
import com.wolfe.pointupload.domain.BackChunk;
import java.util.List;
/**
* 文件分片管理Service接口
*/
public interface IBackChunkService {
/**
* 查询文件分片管理
*
* @param id 文件分片管理ID
* @return 文件分片管理
*/
public BackChunk selectBackChunkById(Long id);
/**
* 查询文件分片管理列表
*
* @param backChunk 文件分片管理
* @return 文件分片管理集合
*/
public List<BackChunk> selectBackChunkList(BackChunk backChunk);
/**
* 新增文件分片管理
*
* @param backChunk 文件分片管理
* @return 结果
*/
public int insertBackChunk(BackChunk backChunk);
/**
* 修改文件分片管理
*
* @param backChunk 文件分片管理
* @return 结果
*/
public int updateBackChunk(BackChunk backChunk);
/**
* 批量删除文件分片管理
*
* @param ids 需要删除的文件分片管理ID
* @return 结果
*/
public int deleteBackChunkByIds(Long[] ids);
/**
* 删除文件分片管理信息
*
* @param id 文件分片管理ID
* @return 结果
*/
public int deleteBackChunkById(Long id);
}
package com.wolfe.pointupload.service.impl;
import com.wolfe.pointupload.common.CommonConstant;
import com.wolfe.pointupload.domain.BackChunk;
import com.wolfe.pointupload.domain.BackFileList;
import com.wolfe.pointupload.domain.CheckChunkVO;
import com.wolfe.pointupload.mapper.BackChunkMapper;
import com.wolfe.pointupload.mapper.BackFileListMapper;
import com.wolfe.pointupload.service.IBackFileService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.List;
import java.util.stream.Collectors;
@Service
@Slf4j
public class BackFileServiceImpl implements IBackFileService {
@Value("${config.upload-path}")
private String uploadPath;
private final static String folderPath = "/file";
@Autowired
private BackChunkMapper backChunkMapper;
@Autowired
private BackFileListMapper backFileListMapper;
/**
* 每一个上传块都会包含如下分块信息:
* chunkNumber: 当前块的次序,第一个块是 1,注意不是从 0 开始的。
* totalChunks: 文件被分成块的总数。
* chunkSize: 分块大小,根据 totalSize 和这个值你就可以计算出总共的块数。注意最后一块的大小可能会比这个要大。
* currentChunkSize: 当前块的大小,实际大小。
* totalSize: 文件总大小。
* identifier: 这个就是每个文件的唯一标示,md5码
* filename: 文件名。
* relativePath: 文件夹上传的时候文件的相对路径属性。
* 一个分块可以被上传多次,当然这肯定不是标准行为,但是在实际上传过程中是可能发生这种事情的,这种重传也是本库的特性之一。
* <p>
* 根据响应码认为成功或失败的:
* 200 文件上传完成
* 201 文加快上传成功
* 500 第一块上传失败,取消整个文件上传
* 507 服务器出错自动重试该文件块上传
*/
@Override
@Transactional(rollbackFor = Exception.class)
public int postFileUpload(BackChunk chunk, HttpServletResponse response) {
int result = CommonConstant.UPDATE_FAIL;
MultipartFile file = chunk.getFile();
log.debug("file originName: {}, chunkNumber: {}", file.getOriginalFilename(), chunk.getChunkNumber());
Path path = Paths.get(generatePath(uploadPath + folderPath, chunk));
try {
Files.write(path, chunk.getFile().getBytes());
log.debug("文件 {} 写入成功, md5:{}", chunk.getFilename(), chunk.getIdentifier());
result = backChunkMapper.insertBackChunk(chunk);
//写入数据库
} catch (IOException e) {
e.printStackTrace();
response.setStatus(507);
return CommonConstant.UPDATE_FAIL;
}
return result;
}
@Override
public CheckChunkVO getFileUpload(BackChunk chunk, HttpServletResponse response) {
CheckChunkVO vo = new CheckChunkVO();
//检查该文件是否存在于 backFileList 中,如果存在,直接返回skipUpload为true,执行闪传
BackFileList backFileList = new BackFileList();
backFileList.setIdentifier(chunk.getIdentifier());
List<BackFileList> BackFileLists = backFileListMapper.selectBackFileListList(backFileList);
if (BackFileLists != null && !BackFileLists.isEmpty()) {
response.setStatus(HttpServletResponse.SC_CREATED);
vo.setSkipUpload(true);
return vo;
}
BackChunk resultChunk = new BackChunk();
resultChunk.setIdentifier(chunk.getIdentifier());
List<BackChunk> backChunks = backChunkMapper.selectBackChunkList(resultChunk);
//将已存在的块的chunkNumber列表返回给前端,前端会规避掉这些块
if (backChunks != null && !backChunks.isEmpty()) {
List<Integer> collect = backChunks.stream().map(BackChunk::getChunkNumber).collect(Collectors.toList());
vo.setUploaded(collect);
}
return vo;
}
@Override
public int deleteBackFileByIds(Long id) {
return 0;
}
@Override
@Transactional(rollbackFor = Exception.class)
public int mergeFile(BackFileList fileInfo) {
String filename = fileInfo.getFilename();
String file = uploadPath + folderPath + "/" + fileInfo.getIdentifier() + "/" + filename;
String folder = uploadPath + folderPath + "/" + fileInfo.getIdentifier();
String url = folderPath + "/" + fileInfo.getIdentifier() + "/" + filename;
merge(file, folder, filename);
//当前文件已存在数据库中时,返回已存在标识
if (backFileListMapper.selectSingleBackFileList(fileInfo) > 0) {
return CommonConstant.UPDATE_EXISTS;
}
fileInfo.setLocation(file);
fileInfo.setUrl(url);
int i = backFileListMapper.insertBackFileList(fileInfo);
if (i > 0) {
//插入文件记录成功后,删除chunk表中的对应记录,释放空间
BackChunk backChunk = new BackChunk();
backChunk.setIdentifier(fileInfo.getIdentifier());
backChunk.setFilename(fileInfo.getFilename());
backChunkMapper.deleteBackChunkByIdentifier(backChunk);
}
return i;
}
/**
* 功能描述:生成块文件所在地址
*/
private String generatePath(String uploadFolder, BackChunk chunk) {
StringBuilder sb = new StringBuilder();
//文件夹地址/md5
sb.append(uploadFolder).append("/").append(chunk.getIdentifier());
//判断uploadFolder/identifier 路径是否存在,不存在则创建
if (!Files.isWritable(Paths.get(sb.toString()))) {
log.info("path not exist,create path: {}", sb.toString());
try {
Files.createDirectories(Paths.get(sb.toString()));
} catch (IOException e) {
log.error(e.getMessage(), e);
}
}
//文件夹地址/md5/文件名-1
return sb.append("/").append(chunk.getFilename()).append("-").append(chunk.getChunkNumber()).toString();
}
/**
* 文件合并
*
* @param targetFile 要形成的文件名
* @param folder 要形成的文件夹地址
* @param filename 文件的名称
*/
public static void merge(String targetFile, String folder, String filename) {
try {
Files.createFile(Paths.get(targetFile));
Files.list(Paths.get(folder)).filter(path -> !path.getFileName().toString().equals(filename)).sorted((o1, o2) -> {
String p1 = o1.getFileName().toString();
String p2 = o2.getFileName().toString();
int i1 = p1.lastIndexOf("-");
int i2 = p2.lastIndexOf("-");
return Integer.valueOf(p2.substring(i2)).compareTo(Integer.valueOf(p1.substring(i1)));
}).forEach(path -> {
try {
//以追加的形式写入文件
Files.write(Paths.get(targetFile), Files.readAllBytes(path), StandardOpenOption.APPEND);
//合并后删除该块
Files.delete(path);
} catch (IOException e) {
log.error(e.getMessage(), e);
}
});
} catch (IOException e) {
log.error(e.getMessage(), e);
}
}
}
package com.wolfe.pointupload.service.impl;
import com.wolfe.pointupload.domain.BackChunk;
import com.wolfe.pointupload.mapper.BackChunkMapper;
import com.wolfe.pointupload.service.IBackChunkService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.List;
/**
* 文件分片管理Service业务层处理
*/
@Service
public class BackChunkServiceImpl implements IBackChunkService {
@Autowired
private BackChunkMapper backChunkMapper;
/**
* 查询文件分片管理
*
* @param id 文件分片管理ID
* @return 文件分片管理
*/
@Override
public BackChunk selectBackChunkById(Long id) {
return backChunkMapper.selectBackChunkById(id);
}
/**
* 查询文件分片管理列表
*
* @param backChunk 文件分片管理
* @return 文件分片管理
*/
@Override
public List<BackChunk> selectBackChunkList(BackChunk backChunk) {
return backChunkMapper.selectBackChunkList(backChunk);
}
/**
* 新增文件分片管理
*
* @param backChunk 文件分片管理
* @return 结果
*/
@Override
public int insertBackChunk(BackChunk backChunk) {
return backChunkMapper.insertBackChunk(backChunk);
}
/**
* 修改文件分片管理
*
* @param backChunk 文件分片管理
* @return 结果
*/
@Override
public int updateBackChunk(BackChunk backChunk) {
return backChunkMapper.updateBackChunk(backChunk);
}
/**
* 批量删除文件分片管理
*
* @param ids 需要删除的文件分片管理ID
* @return 结果
*/
@Override
public int deleteBackChunkByIds(Long[] ids) {
return backChunkMapper.deleteBackChunkByIds(ids);
}
/**
* 删除文件分片管理信息
*
* @param id 文件分片管理ID
* @return 结果
*/
@Override
public int deleteBackChunkById(Long id) {
return backChunkMapper.deleteBackChunkById(id);
}
}
package com.wolfe.pointupload.controller;
import com.wolfe.pointupload.common.AjaxResult;
import com.wolfe.pointupload.common.CommonConstant;
import com.wolfe.pointupload.domain.BackChunk;
import com.wolfe.pointupload.domain.BackFileList;
import com.wolfe.pointupload.domain.CheckChunkVO;
import com.wolfe.pointupload.service.IBackFileService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import javax.servlet.http.HttpServletResponse;
@RestController
@RequestMapping("/file")
public class FileController {
@Autowired
private IBackFileService backFileService;
@GetMapping("/test")
public String test() {
return "Hello Wolfe.";
}
/**
* 上传文件
*/
@PostMapping("/upload")
public AjaxResult postFileUpload(@ModelAttribute BackChunk chunk, HttpServletResponse response) {
int i = backFileService.postFileUpload(chunk, response);
return AjaxResult.success(i);
}
/**
* 检查文件上传状态
*/
@GetMapping("/upload")
public CheckChunkVO getFileUpload(@ModelAttribute BackChunk chunk, HttpServletResponse response) {
//查询根据md5查询文件是否存在
CheckChunkVO fileUpload = backFileService.getFileUpload(chunk, response);
return fileUpload;
}
/**
* 删除
*/
@DeleteMapping("/{id}")
public AjaxResult remove(@PathVariable("id") Long id) {
return AjaxResult.success(backFileService.deleteBackFileByIds(id));
}
/**
* 检查文件上传状态
*/
@PostMapping("/merge")
public AjaxResult merge(@RequestBody BackFileList backFileList) {
int i = backFileService.mergeFile(backFileList);
if (i == CommonConstant.UPDATE_EXISTS.intValue()) {
//应对合并时断线导致的无法重新申请合并的问题
return new AjaxResult(200, "已合并,无需再次提交");
}
return AjaxResult.success(i);
}
}
@MapperScan("com.wolfe.**.mapper")
package com.wolfe.pointupload;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@MapperScan("com.wolfe.**.mapper")
@SpringBootApplication
public class PointUploadApplication {
public static void main(String[] args) {
SpringApplication.run(PointUploadApplication.class, args);
}
}
npm i -g @vue/cli
npm i -g @vue/cli-init
vue init webpack point-upload-f
cd point-upload-f
npm install vue-simple-uploader --save
npm i spark-md5 --save
npm i jquery --save
npm i axios --save
<template>
<div>
状态:<div id="status"></div>
<uploader ref="uploader"
:options="options"
:autoStart="true"
@file-added="onFileAdded"
@file-success="onFileSuccess"
@file-progress="onFileProgress"
@file-error="onFileError">
</uploader>
</div>
</template>
<script>
import SparkMD5 from 'spark-md5';
import axios from 'axios';
import $ from 'jquery'
export default {
name: 'Upload',
data() {
return {
options: {
target: 'http://127.0.0.1:8081/file/upload',
chunkSize: 5 * 1024 * 1000,
fileParameterName: 'file',
maxChunkRetries: 2,
testChunks: true, //是否开启服务器分片校验
checkChunkUploadedByResponse: function (chunk, message) {
// 服务器分片校验函数,秒传及断点续传基础
let objMessage = JSON.parse(message);
if (objMessage.skipUpload) {
return true;
}
return (objMessage.uploaded || []).indexOf(chunk.offset + 1) >= 0
},
headers: {
Authorization: ''
},
query() {
}
}
}
},
computed: {
//Uploader实例
uploader() {
return this.$refs.uploader.uploader;
}
},
methods: {
onFileAdded(file) {
console.log("... onFileAdded")
this.computeMD5(file);
},
onFileProgress(rootFile, file, chunk) {
console.log("... onFileProgress")
},
onFileSuccess(rootFile, file, response, chunk) {
let res = JSON.parse(response);
// 如果服务端返回需要合并
if (res.needMerge) {
// 文件状态设为“合并中”
this.statusSet(file.id, 'merging');
let param = {
'filename': rootFile.name,
'identifier': rootFile.uniqueIdentifier,
'totalSize': rootFile.size
}
axios({
method: 'post',
url: "http://127.0.0.1:8081/file/merge",
data: param
}).then(res => {
this.statusRemove(file.id);
}).catch(e => {
console.log("合并异常,重新发起请求,文件名为:", file.name)
file.retry();
});
}
},
onFileError(rootFile, file, response, chunk) {
console.log("... onFileError")
},
computeMD5(file) {
let fileReader = new FileReader();
let time = new Date().getTime();
let blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
let currentChunk = 0;
const chunkSize = 10 * 1024 * 1000;
let chunks = Math.ceil(file.size / chunkSize);
let spark = new SparkMD5.ArrayBuffer();
// 文件状态设为"计算MD5"
this.statusSet(file.id, 'md5');
file.pause();
loadNext();
fileReader.onload = (e => {
spark.append(e.target.result);
if (currentChunk < chunks) {
currentChunk++;
loadNext();
// 实时展示MD5的计算进度
this.$nextTick(() => {
$(`.myStatus_${file.id}`).text('校验MD5 ' + ((currentChunk / chunks) * 100).toFixed(0) + '%')
})
} else {
let md5 = spark.end();
this.computeMD5Success(md5, file);
console.log(`MD5计算完毕:${file.name} \nMD5:${md5} \n分片:${chunks} 大小:${file.size} 用时:${new Date().getTime() - time} ms`);
}
});
fileReader.onerror = function () {
this.error(`文件${file.name}读取出错,请检查该文件`)
file.cancel();
};
function loadNext() {
let start = currentChunk * chunkSize;
let end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize;
fileReader.readAsArrayBuffer(blobSlice.call(file.file, start, end));
}
},
statusSet(id, status) {
let statusMap = {
md5: {
text: '校验MD5',
bgc: '#fff'
},
merging: {
text: '合并中',
bgc: '#e2eeff'
},
transcoding: {
text: '转码中',
bgc: '#e2eeff'
},
failed: {
text: '上传失败',
bgc: '#e2eeff'
}
}
console.log(".....", status, "...:", statusMap[status].text)
this.$nextTick(() => {
// $(`<p class="myStatus_${id}"></p>`).appendTo(`.file_${id} .uploader-file-status`).css({
$(`<p class="myStatus_${id}"></p>`).appendTo(`#status`).css({
'position': 'absolute',
'top': '0',
'left': '0',
'right': '0',
'bottom': '0',
'zIndex': '1',
'line-height': 'initial',
'backgroundColor': statusMap[status].bgc
}).text(statusMap[status].text);
})
},
computeMD5Success(md5, file) {
Object.assign(this.uploader.opts, {
query: {
...this.params,
}
})
file.uniqueIdentifier = md5;
file.resume();
this.statusRemove(file.id);
},
statusRemove(id) {
this.$nextTick(() => {
$(`.myStatus_${id}`).remove();
})
},
}
}
</script>
<style scoped>
</style>
<template>
<div id="app">
<router-view/>
</div>
</template>
<script>
export default {
name: 'App'
}
</script>
<style>
body {
margin: 0;
padding: 0;
}
#app {
font-family: 'Avenir', Helvetica, Arial, sans-serif;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
text-align: center;
color: #2c3e50;
margin: 0;
padding: 0;
}
</style>
import Vue from 'vue'
import Router from 'vue-router'
import HelloWorld from '@/components/HelloWorld'
import Upload from '@/view/Upload'
Vue.use(Router)
export default new Router({
routes: [
{
path: '/',
name: 'HelloWorld',
component: HelloWorld
},
{
path: '/upload',
name: 'HelloWorld',
component: Upload
}
]
})
// The Vue build version to load with the `import` command
// (runtime-only or standalone) has been set in webpack.base.conf with an alias.
import Vue from 'vue'
import App from './App'
import router from './router'
import uploader from 'vue-simple-uploader'
Vue.config.productionTip = false
Vue.use(uploader)
/* eslint-disable no-new */
new Vue({
el: '#app',
router,
components: { App },
template: '<App/>'
})
准备数据库,新建数据库和数据表
mysql -u root -p
******
CREATE DATABASE IF NOT EXISTS pointupload
DEFAULT CHARACTER SET utf8
DEFAULT COLLATE utf8_general_ci;
use pointupload;
#文件信息表
CREATE TABLE `t_file_list` (
`id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`filename` varchar(64) COMMENT '文件名',
`identifier` varchar(64) COMMENT '唯一标识,MD5',
`url` varchar(128) COMMENT '链接',
`location` varchar(128) COMMENT '本地地址',
`total_size` bigint COMMENT '文件总大小',
PRIMARY KEY (`id`) USING BTREE,
UNIQUE KEY `FILE_UNIQUE_KEY` (`filename`,`identifier`) USING BTREE
) ENGINE=InnoDB;
#上传文件分片信息表
CREATE TABLE `t_chunk_info` (
`id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`chunk_number` int COMMENT '文件块编号',
`chunk_size` bigint COMMENT '分块大小',
`current_chunk_size` bigint COMMENT '当前分块大小',
`filename` varchar(255) COMMENT '文件名',
`identifier` varchar(255) COMMENT '文件标识,MD5',
`relative_path` varchar(255) COMMENT '相对路径',
`total_chunks` int COMMENT '总块数',
`total_size` bigint COMMENT '总大小',
PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB ;
截至到这里,代码就结束了
下班~
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。