大体思路如下:
1、数据库中存放文件路径,所有文件保存在 MINIO 中,文件名即是文件的 MD5。
2、当用户上传文件时,首先判断该文件信息是否存在在数据库中,如果存在则直接显示上传成功(急速上传),若不存在则执行上传操作。
3、文件在真正上传之前先判断文件大小,太小的不需要创建分片上传任务,一次性上传即可。
4、后台调用 MINIO 的 API 创建分片上传任务(得到一个任务 ID ),并为该任务生成分片上传链接(上传地址列表)后返回给前端,前端将对应分片按照到对应的连接传递到 MINIO 中。
5、分片上传成功后更新进度信息。
6、所有分片上传结束后,调用 MINIO 的 API 将当前任务的分片全部合并形成整个文件。
const chunkSize = 5 * 1024 * 1024; // 切片大小为5M
前端使用 SparkMD5 获取文件的 MD5 信息,当该 MD5 信息已经存在在数据库中时,即上传完成(急速上传)
下面是获取文件 MD5 的方法
import SparkMD5 from 'spark-md5';
//获取文件的MD5信息 分片获取
const ReadFileMD5 = (param) => {
return new Promise((resolve, reject) => {
const file = param.file;
const fileReader = new FileReader();
const md5 = new SparkMD5();
let index = 0;
const loadFile = () => {
const slice = file.slice(index, index + chunkSize);
fileReader.readAsBinaryString(slice);
};
loadFile();
fileReader.onload = (e) => {
md5.appendBinary(e.target.result);
if (index < file.size) {
index += chunkSize;
loadFile();
} else {
// md5.end() 就是文件md5码
var md5Str = md5.end();
return resolve(md5Str);
}
};
fileReader.onerror = () => {
reject('文件MD5获取失败');
};
});
};
当确认该文件的 MD5 在数据库中不存在时,开始触发我们的上传操作
let chunks = Math.ceil(file.file.size / chunkSize);
我们需要重新写一个 MINIO 客户端来实现我们的分片上传。
/**
* MINIO 遵循 AmazonS3 规则,S3 有的方法他都有实现
* 关于其他方法
* 参考 MINIO 网站
* https://minio-java.min.io/
* 结合 亚马逊官方文档
* https://docs.aws.amazon.com/AmazonS3/latest/API
* 查看方法使用和效果
*/
public class CustomMinioClient extends MinioClient {
protected CustomMinioClient(MinioClient client) {
super(client);
}
//创建分块上传任务
public String initMultiPartUpload(String bucket, String region, String object, Multimap<String, String> headers, Multimap<String, String> extraQueryParams) throws IOException, InvalidKeyException, NoSuchAlgorithmException, InsufficientDataException, ServerException, InternalException, XmlParserException, InvalidResponseException, ErrorResponseException {
CreateMultipartUploadResponse response = this.createMultipartUpload(bucket, region, object, headers, extraQueryParams);
return response.result().uploadId();
}
//合并指定上传任务的分块文件
public ObjectWriteResponse mergeMultipartUpload(String bucketName, String region, String objectName, String uploadId, Part[] parts, Multimap<String, String> extraHeaders, Multimap<String, String> extraQueryParams) throws IOException, InvalidKeyException, NoSuchAlgorithmException, InsufficientDataException, ServerException, InternalException, XmlParserException, InvalidResponseException, ErrorResponseException {
return this.completeMultipartUpload(bucketName, region, objectName, uploadId, parts, extraHeaders, extraQueryParams);
}
//获取指定上传任务内的已上传的分块信息
public ListPartsResponse listMultipart(String bucketName, String region, String objectName, Integer maxParts, Integer partNumberMarker, String uploadId, Multimap<String, String> extraHeaders, Multimap<String, String> extraQueryParams) throws NoSuchAlgorithmException, InsufficientDataException, IOException, InvalidKeyException, ServerException, XmlParserException, ErrorResponseException, InternalException, InvalidResponseException {
return this.listParts(bucketName, region, objectName, maxParts, partNumberMarker, uploadId, extraHeaders, extraQueryParams);
}
}
...
if (partCount == 1) {
//只有一个分片的情况下 直接返回上传地址
String uploadObjectUrl = MinioUtils.getUploadObjectUrl(MinioUtils.FILE_WAREHOUSE,objectName);
result.setUploadUrl(new ArrayList<String>(){{add(uploadObjectUrl);}});
}else {
Map<String, Object> initRsl = MinioUtils.initMultiPartUpload(MinioUtils.FILE_WAREHOUSE, objectName, partCount, contentType);
result.setFinished(false);
result.setUploadId(initRsl.get("uploadId").toString());
result.setUploadUrl((List<String>)initRsl.get("uploadUrls"));
}
...
其中比较重要的创建 MINIO 上传任务的方法如下
/**
* 单文件上传
*
* @param objectName 文件全路径名称
* @return /
*/
public static String getUploadObjectUrl(String bucketName, String objectName) {
try {
//创建 MINIO 连接
CustomMinioClient customMinioClient = new CustomMinioClient(MinioClient.builder()
.endpoint(properties.getUrl())//MINIO 服务地址
.credentials(properties.getAccessKey(), properties.getSecureKey())//用户名和密码
.build());
return customMinioClient.getPresignedObjectUrl(
GetPresignedObjectUrlArgs.builder()
.method(Method.PUT)//GET方式请求
.bucket(bucketName)//存储桶的名字
.object(objectName)//文件的名字
.expiry(1, TimeUnit.DAYS)//上传地址有效时长
.build()
);
} catch (Exception e) {
return null;
}
}
/**
* 创建分块任务
*
* @param bucketName 存储桶名称
* @param objectName 文件全路径名称
* @param partCount 分片数量
* @return /
*/
public static Map<String, Object> initMultiPartUpload(String bucketName,String objectName, int partCount,String contentType) {
Map<String, Object> result = new HashMap<>();
try {
//如果类型使用默认流会导致无法预览
contentType = "application/octet-stream";
HashMultimap<String, String> headers = HashMultimap.create();
headers.put("Content-Type", contentType);
CustomMinioClient customMinioClient = new CustomMinioClient(MinioClient.builder()
.endpoint(properties.getUrl())
.credentials(properties.getAccessKey(), properties.getSecureKey())
.build());
checkBucket(customMinioClient,false,bucketName);
//初始化分块上传任务
String uploadId = customMinioClient.initMultiPartUpload(bucketName, null, objectName, headers, null);
result.put("uploadId", uploadId);
List<String> partList = new ArrayList<>();
Map<String, String> reqParams = new HashMap<>();
reqParams.put("uploadId", uploadId);
for (int i = 1; i <= partCount; i++) {
reqParams.put("partNumber", String.valueOf(i))
//返回带签名URL
String uploadUrl = customMinioClient.getPresignedObjectUrl(
GetPresignedObjectUrlArgs.builder()
.method(Method.PUT)//GET方式请求
.bucket(bucketName)//存储桶的名字
.object(objectName)//文件的名字
.expiry(1, TimeUnit.DAYS)//上传地址有效时长
.extraQueryParams(reqParams)//指定任务ID和当前是第几个分块,生成上传链接
.build());
partList.add(uploadUrl);
}
//返回任务ID 和 分块任务列表
result.put("uploadUrls", partList);
} catch (Exception e) {
return null;
}
return result;
}
/**
* 检查是否存在指定桶 不存在则先创建
* @param minioClient
* @param versioning
* @param bucket
* @throws Exception
*/
private static void checkBucket(MinioClient minioClient ,boolean versioning, String bucket) throws Exception {
boolean exists = minioClient.bucketExists(BucketExistsArgs.builder().bucket(bucket).build());
if (!exists) {
minioClient.makeBucket(MakeBucketArgs.builder().bucket(bucket).build());
//设置Procy属性 默认所有文件都能读取 遵循 AmazonS3 规则
String config = "{ " +
" \"Id\": \"Policy1\", " +
" \"Version\": \"2012-10-17\", " +
" \"Statement\": [ " +
" { " +
" \"Sid\": \"Statement1\", " +
" \"Effect\": \"Allow\", " +
" \"Action\": [ " +
" \"s3:ListBucket\", " +
" \"s3:GetObject\" " +
" ], " +
" \"Resource\": [ " +
" \"arn:aws:s3:::"+bucket+"\", " +
" \"arn:aws:s3:::"+bucket+"/*\" " +
" ]," +
" \"Principal\": \"*\"" +
" } " +
" ] " +
"}";
minioClient.setBucketPolicy(
SetBucketPolicyArgs.builder().bucket(bucket).config(config).build());
}
// 版本控制
VersioningConfiguration configuration = minioClient.getBucketVersioning(GetBucketVersioningArgs.builder().bucket(bucket).build());
boolean enabled = configuration.status() == VersioningConfiguration.Status.ENABLED;
if (versioning && !enabled) {
minioClient.setBucketVersioning(SetBucketVersioningArgs.builder()
.config(new VersioningConfiguration(VersioningConfiguration.Status.ENABLED, null)).bucket(bucket).build());
} else if (!versioning && enabled) {
minioClient.setBucketVersioning(SetBucketVersioningArgs.builder()
.config(new VersioningConfiguration(VersioningConfiguration.Status.SUSPENDED, null)).bucket(bucket).build());
}
}
前端全部上传完毕之后,通知后台进行文件合并操作
...
//先判断文件列表是否完整
List<String> partList = MinioUtils.getExsitParts(MinioUtils.FILE_WAREHOUSE, md5, uploadId);
if (CollectionUtils.isNotEmpty(partList)) {
//上传列表不是空 判断上传列表是否完整
if (chuncks.compareTo(partList.size()) < 0) {
//缺少分片
return R.failure("文件分片缺失,请重新上传");
} else {
//分片完整 整合并返回
boolean success = MinioUtils.mergeMultipartUpload(MinioUtils.FILE_WAREHOUSE, md5, uploadId);
if (!success) {
//合并失败
return R.failure("合并文件异常");
}
}
} else {
return R.failure("文件分片缺失,请重新上传");
}
...
public static List<String> getExsitParts(String bucketName, String objectName, String uploadId) {
List<String> parts = new ArrayList<>();
try {
/**
* 最大分片1000
*/
customMinioClient = new CustomMinioClient(MinioClient.builder()
.endpoint(properties.getUrl())
.credentials(properties.getAccessKey(), properties.getSecureKey())
.build());
ListPartsResponse partResult = customMinioClient.listMultipart(bucketName, null, objectName, 1024, 0, uploadId, null, null);
for (Part part : partResult.result().partList()) {
parts.add(part.etag());
}
//合并分片
} catch (Exception e) {
//
log.error("查询任务分片错误");
}
return parts;
}
/**
* 文件合并
* @param bucketName
* @param objectName
* @param uploadId
* @return
*/
public static boolean mergeMultipartUpload(String bucketName, String objectName, String uploadId) {
try {
Part[] parts = new Part[1000];
/**
* 默认最大分片1000 因为AmazonS规则里面默认最大分片就是1000,可以通过修改max-parts更改
*/
CustomMinioClient customMinioClient = new CustomMinioClient(MinioClient.builder()
.endpoint(properties.getUrl())
.credentials(properties.getAccessKey(), properties.getSecureKey())
.build());
ListPartsResponse partResult = customMinioClient.listMultipart(bucketName, null, objectName, 1000, 0, uploadId, null, null);
int partNumber = 1;
for (Part part : partResult.result().partList()) {
parts[partNumber - 1] = new Part(partNumber, part.etag());
partNumber++;
}
//合并分片
customMinioClient.mergeMultipartUpload(bucketName, null, objectName, uploadId, parts, null, null);
} catch (Exception e) {
return false;
}
return true;
}