对象存储服务是一种海量、安全、低成本、高可靠的云存储服务,适合存放任意类型的文件。容量和处理能力弹性扩展,多种存储类型供选择,全面优化存储成本。
国内的对象存储厂商
这些云存储厂商都有各自对应的SDK支持,可以很方便的集成到各自的系统中去。
为了能够实现能够根据配置动态的切换云存储厂商,本文使用Amazon S3提供的接口。
可以在后台动态添加对象存储配置,系统可根据后台配置的对象存储,可以把文件上传到对应的存储厂商中去.
对象存储配置表 此表用来存放对象存储的配置信息
create table sys_oss_config
(
oss_config_id bigint unsigned not null auto_increment comment 'ID',
config_key varchar(255) default '' comment '配置key',
access_key varchar(255) default '' comment 'accessKey',
secret_key varchar(255) default '' comment '秘钥',
bucket_name varchar(255) default '' comment '桶名称',
prefix varchar(255) default '' comment '前缀',
endpoint varchar(255) default '' comment '访问站点',
domain varchar(255) default '' comment '自定义域名',
region varchar(255) default '' comment '域',
is_https tinyint default 0 comment '是否https(0否 1是)',
ext1 varchar(255) default '' comment '扩展字段',
del_flag tinyint default 0 comment '删除状态(0正常 1删除)',
status tinyint default 0 comment '数据状态(0正常 1停用)',
create_by varchar(64) default '' comment '创建者',
create_time datetime comment '创建时间',
update_by varchar(64) default '' comment '更新者',
update_time datetime comment '更新时间',
remark varchar(500) comment '备注',
primary key (oss_config_id)
);
alter table sys_oss_config comment '对象存储配置表';
对象存储文件表 此表用来存放对象存储中的文件信息
create table sys_oss
(
oss_id bigint unsigned not null auto_increment comment 'ID',
file_name varchar(255) default '' comment '文件名',
original_name varchar(255) default '' comment '原文件名',
file_suffix varchar(20) default '' comment '文件后缀',
file_size bigint default 0 comment '文件大小',
url varchar(500) default '' comment 'URL地址',
source varchar(20) default '' comment '来源(service后台 client客户端)',
service varchar(20) default '' comment '服务商(aliyun阿里云 qiniu七牛云)',
del_flag tinyint default 0 comment '删除状态',
status tinyint default 0 comment '数据状态',
create_by varchar(64) default '' comment '创建者',
create_time datetime comment '创建时间',
update_by varchar(64) default '' comment '更新者',
update_time datetime comment '更新时间',
primary key (oss_id)
);
alter table sys_oss comment '对象存储表';
<dependency>
<groupId>com.amazonawsgroupId>
<artifactId>aws-java-sdk-s3artifactId>
<version>1.12.248version>
dependency>
package com.ruoyi.framework.oss.core;
import cn.hutool.core.util.IdUtil;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.Protocol;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.CreateBucketRequest;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.ruoyi.common.utils.DateUtils;
import com.ruoyi.common.utils.StringUtils;
import com.ruoyi.framework.oss.constant.OssConstant;
import com.ruoyi.framework.oss.entity.UploadResult;
import com.ruoyi.framework.oss.enums.PolicyType;
import com.ruoyi.framework.oss.exception.OssException;
import com.ruoyi.framework.oss.properties.OssProperties;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
/**
* @author zouhuu
* @description S3 存储协议 所有兼容S3协议的云厂商均支持
* @date 2022/07/24 20:57:27
*/
public class OssClient {
private final String configKey;
private final OssProperties properties;
private final AmazonS3 client;
public OssClient(String configKey, OssProperties ossProperties) {
this.configKey = configKey;
this.properties = ossProperties;
try {
AwsClientBuilder.EndpointConfiguration endpointConfig =
new AwsClientBuilder.EndpointConfiguration(properties.getEndpoint(), properties.getRegion());
AWSCredentials credentials = new BasicAWSCredentials(properties.getAccessKey(), properties.getSecretKey());
AWSCredentialsProvider credentialsProvider = new AWSStaticCredentialsProvider(credentials);
ClientConfiguration clientConfig = new ClientConfiguration();
if (OssConstant.IS_HTTPS.equals(properties.getIsHttps())) {
clientConfig.setProtocol(Protocol.HTTPS);
} else {
clientConfig.setProtocol(Protocol.HTTP);
}
this.client = AmazonS3Client.builder()
.withEndpointConfiguration(endpointConfig)
.withClientConfiguration(clientConfig)
.withCredentials(credentialsProvider)
.disableChunkedEncoding()
.build();
createBucket();
} catch (Exception e) {
if (e instanceof OssException) {
throw e;
}
throw new OssException("配置错误! 请检查系统配置:[" + e.getMessage() + "]");
}
}
public void createBucket() {
try {
String bucketName = properties.getBucketName();
if (client.doesBucketExistV2(bucketName)) {
return;
}
CreateBucketRequest createBucketRequest = new CreateBucketRequest(bucketName);
createBucketRequest.setCannedAcl(CannedAccessControlList.PublicRead);
client.createBucket(createBucketRequest);
client.setBucketPolicy(bucketName, getPolicy(bucketName, PolicyType.READ));
} catch (Exception e) {
throw new OssException("创建Bucket失败, 请核对配置信息:[" + e.getMessage() + "]");
}
}
public UploadResult upload(byte[] data, String path, String contentType) {
return upload(new ByteArrayInputStream(data), path, contentType);
}
public UploadResult upload(InputStream inputStream, String path, String contentType) {
try {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(contentType);
metadata.setContentLength(inputStream.available());
client.putObject(new PutObjectRequest(properties.getBucketName(), path, inputStream, metadata));
} catch (Exception e) {
throw new OssException("上传文件失败,请检查配置信息:[" + e.getMessage() + "]");
}
return UploadResult.builder().url(getUrl() + "/" + path).filename(path).build();
}
public void delete(String path) {
path = path.replace(getUrl() + "/", "");
try {
client.deleteObject(properties.getBucketName(), path);
} catch (Exception e) {
throw new OssException("上传文件失败,请检查配置信息:[" + e.getMessage() + "]");
}
}
public UploadResult uploadSuffix(byte[] data, String suffix, String contentType) {
return upload(data, getPath(properties.getPrefix(), suffix), contentType);
}
public UploadResult uploadSuffix(InputStream inputStream, String suffix, String contentType) {
return upload(inputStream, getPath(properties.getPrefix(), suffix), contentType);
}
public String getUrl() {
String domain = properties.getDomain();
if (StringUtils.isNotBlank(domain)) {
return domain;
}
String endpoint = properties.getEndpoint();
String header = OssConstant.IS_HTTPS.equals(properties.getIsHttps()) ? "https://" : "http://";
// 云服务商直接返回
if (StringUtils.containsAny(endpoint, OssConstant.CLOUD_SERVICE)){
return header + properties.getBucketName() + "." + endpoint;
}
// minio 单独处理
return header + endpoint + "/" + properties.getBucketName();
}
public String getPath(String prefix, String suffix) {
// 生成uuid
String uuid = IdUtil.fastSimpleUUID();
// 文件路径
String path = DateUtils.datePath() + "/" + uuid;
if (StringUtils.isNotBlank(prefix)) {
path = prefix + "/" + path;
}
return path + suffix;
}
public String getConfigKey() {
return configKey;
}
private static String getPolicy(String bucketName, PolicyType policyType) {
StringBuilder builder = new StringBuilder();
builder.append("{\n\"Statement\": [\n{\n\"Action\": [\n");
if (policyType == PolicyType.WRITE) {
builder.append("\"s3:GetBucketLocation\",\n\"s3:ListBucketMultipartUploads\"\n");
} else if (policyType == PolicyType.READ_WRITE) {
builder.append("\"s3:GetBucketLocation\",\n\"s3:ListBucket\",\n\"s3:ListBucketMultipartUploads\"\n");
} else {
builder.append("\"s3:GetBucketLocation\"\n");
}
builder.append("],\n\"Effect\": \"Allow\",\n\"Principal\": \"*\",\n\"Resource\": \"arn:aws:s3:::");
builder.append(bucketName);
builder.append("\"\n},\n");
if (policyType == PolicyType.READ) {
builder.append("{\n\"Action\": [\n\"s3:ListBucket\"\n],\n\"Effect\": \"Deny\",\n\"Principal\": \"*\",\n\"Resource\": \"arn:aws:s3:::");
builder.append(bucketName);
builder.append("\"\n},\n");
}
builder.append("{\n\"Action\": ");
switch (policyType) {
case WRITE:
builder.append("[\n\"s3:AbortMultipartUpload\",\n\"s3:DeleteObject\",\n\"s3:ListMultipartUploadParts\",\n\"s3:PutObject\"\n],\n");
break;
case READ_WRITE:
builder.append("[\n\"s3:AbortMultipartUpload\",\n\"s3:DeleteObject\",\n\"s3:GetObject\",\n\"s3:ListMultipartUploadParts\",\n\"s3:PutObject\"\n],\n");
break;
default:
builder.append("\"s3:GetObject\",\n");
break;
}
builder.append("\"Effect\": \"Allow\",\n\"Principal\": \"*\",\n\"Resource\": \"arn:aws:s3:::");
builder.append(bucketName);
builder.append("/*\"\n}\n],\n\"Version\": \"2012-10-17\"\n}\n");
return builder.toString();
}
}
package com.ruoyi.framework.oss.factory;
import cn.hutool.json.JSONUtil;
import com.alibaba.fastjson2.JSONObject;
import com.ruoyi.common.constant.CacheConstants;
import com.ruoyi.common.core.redis.RedisCache;
import com.ruoyi.common.utils.StringUtils;
import com.ruoyi.common.utils.spring.SpringUtils;
import com.ruoyi.framework.oss.constant.OssConstant;
import com.ruoyi.framework.oss.core.OssClient;
import com.ruoyi.framework.oss.exception.OssException;
import com.ruoyi.framework.oss.properties.OssProperties;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author zouhuu
* @description 文件上传Factory
* @date 2022/07/24 20:56:43
*/
public class OssFactory {
private static final Map<String, OssClient> CLIENT_CACHE = new ConcurrentHashMap<>();
/**
* 初始化工厂
*/
public static void init() {
String configKey = SpringUtils.getBean(RedisCache.class).getCacheObject(CacheConstants.CACHE_CONFIG_KEY).toString();
}
/**
* 获取默认实例
*/
public static OssClient instance() {
// 获取redis 默认类型
String configKey = SpringUtils.getBean(RedisCache.class).getCacheObject(OssConstant.CACHE_CONFIG_KEY).toString();
if (StringUtils.isEmpty(configKey)) {
throw new OssException("文件存储服务类型无法找到!");
}
return instance(configKey);
}
/**
* 根据类型获取实例
*/
public static OssClient instance(String configKey) {
OssClient client = getClient(configKey);
if (client == null) {
refresh(configKey);
return getClient(configKey);
}
return client;
}
private static void refresh(String configKey) {
String jsonstr = SpringUtils.getBean(RedisCache.class).getCacheObject(OssConstant.SYS_OSS_KEY + configKey).toString();
OssProperties properties = JSONUtil.toBean(jsonstr, OssProperties.class);
if (properties == null) {
throw new OssException("系统异常, '" + configKey + "'配置信息不存在!");
}
CLIENT_CACHE.put(configKey, new OssClient(configKey, properties));
}
private static OssClient getClient(String configKey) {
return CLIENT_CACHE.get(configKey);
}
}
package com.ruoyi.framework.oss.service.impl;
import com.ruoyi.common.exception.ServiceException;
import com.ruoyi.common.utils.StringUtils;
import com.ruoyi.framework.oss.core.OssClient;
import com.ruoyi.framework.oss.entity.UploadResult;
import com.ruoyi.framework.oss.factory.OssFactory;
import com.ruoyi.framework.oss.service.IFileService;
import com.ruoyi.system.domain.SysOss;
import com.ruoyi.system.service.ISysOssService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;
import java.io.IOException;
/**
* @author zouhuu
* @description 文件服务层
* @date 2022/07/25 14:40:08
*/
@Service
public class IFileServiceImpl implements IFileService {
@Autowired
private ISysOssService sysOssService;
@Override
public UploadResult upload(MultipartFile file, String source) {
String originalfileName = file.getOriginalFilename();
String suffix = StringUtils.substring(originalfileName, originalfileName.lastIndexOf("."), originalfileName.length());
OssClient storage = OssFactory.instance();
UploadResult uploadResult;
try {
uploadResult = storage.uploadSuffix(file.getBytes(), suffix, file.getContentType());
} catch (IOException e) {
throw new ServiceException(e.getMessage());
}
// 保存文件信息
SysOss oss = new SysOss();
oss.setUrl(uploadResult.getUrl());
oss.setFileSuffix(suffix);
oss.setFileName(uploadResult.getFilename());
oss.setOriginalName(originalfileName);
oss.setService(storage.getConfigKey());
oss.setFileSize(file.getSize());
oss.setSource(source);
sysOssService.insertSysOss(oss);
return uploadResult;
}
}
gitee.com/zouhuu_admin/RuoYi-Vue-Staging
具体文件夹为:
RuoYi-Vue-Staging/ Project-Service / ruoyi-framework / src / main / java / com / ruoyi / framework / oss