springcloud+vue3 implements multipart upload and resumes upload at breakpoints

Background

When working on some application-level service platforms, sometimes users need to upload a larger file, but after the upload fails, they need to support uploading from the failed point next time. At this time, endpoint resume and multi-part upload need to be used. s solution. This article introduces a strategy to implement this scenario, involving technology stacks such as ng, springboot, vue, minio, amazonS3 and other frameworks and components.

Preparation

1. ng set file size

The purpose of this step is to prevent the file from being uploaded due to size after being fragmented.

client_max_body_size 100m; # Currently our project requires 100m set here

http {
    include /etc/nginx/mime.types;
    default_type application/octet-stream;

    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log /var/log/nginx/access.log main;
    # Set the maximum supported file size here
    client_max_body_size 100m;
    sendfile on;
    #tcp_nopush on;

    keepalive_timeout 65;
....

    }

2. springboot configuration

servlet:
  multipart:
    max-file-size: 100MB
    #Maximum supported request size
    max-request-size: 500MB

Access

Front-end upload component UploadParallel.vue

<script setup>
import { UploadFilled } from '@element-plus/icons-vue'

import md5 from "../lib/md5";
import { taskInfo, initTask, preSignUrl, merge } from '../lib/api';
import {ElNotification} from "element-plus";
import Queue from 'promise-queue-plus';
import axios from 'axios'
import { ref } from 'vue'

// Queue for file upload chunked tasks (used to stop the file upload queue when removing a file) key: fileUid value: queue object
const fileUploadChunkQueue = ref({}).value

/**
 * Get an upload task, if not, initialize one
 */
const getTaskInfo = async (file) => {
    let task;
    const identifier = await md5(file)
    const { code, data, msg } = await taskInfo(identifier)
    if (code === 200) {
        task=data
        if (!task || Object.keys(task).length === 0) {
            const initTaskData = {
                identifier,
                fileName: file.name,
                totalSize: file.size,
                chunkSize: 5 * 1024 * 1024
            }
            const { code, data, msg } = await initTask(initTaskData)
            if (code === 200) {
                task=data
            } else {
                ElNotification.error({
                    title: 'File upload error',
                    message: msg
                })
            }
        }
    } else {
        ElNotification.error({
            title: 'File upload error',
            message: msg
        })
    }
    return task
}

/**
 * Upload logic processing, if the file has been uploaded (completed block merging operation), it will not enter this method
 */
const handleUpload = (file, taskRecord, options) => {

    let lastUploadedSize = 0; //The total size uploaded when the last upload was resumed
    let uploadedSize = 0 // uploaded size
    const totalSize = file.size || 0 // Total file size
    let startMs = new Date().getTime(); // Start upload time
    const { exitPartList, chunkSize, chunkNum, fileIdentifier } = taskRecord

    // Get the average upload speed from the beginning to now (byte/s)
    const getSpeed = () => {
        // Total size uploaded - Total size of the last upload (resumable upload) = Total size of this upload (byte)
        const intervalSize = uploadedSize - lastUploadedSize
        const nowMs = new Date().getTime()
        // time interval (s)
        const intervalTime = (nowMs - startMs) / 1000
        return intervalSize / intervalTime
    }

    const uploadNext = async (partNumber) => {
        const start = new Number(chunkSize) * (partNumber - 1)
        const end = start + new Number(chunkSize)
        const blob = file.slice(start, end)
        const { code, data, msg } = await preSignUrl({ identifier: fileIdentifier, partNumber: partNumber} )
        if (code === 200 & amp; & amp; data) {
            await axios.request({
                url: data,
                method: 'PUT',
                data: blob,
                headers: {'Content-Type': 'application/octet-stream'}
            })
            return Promise.resolve({ partNumber: partNumber, uploadedSize: blob.size })
        }
        return Promise.reject(`Shard ${partNumber}, failed to obtain upload address`)
    }

    /**
     *Update upload progress
     * @param increment The amount of bytes added to the uploaded progress
     */
    const updateProcess = (increment) => {
        increment = new Number(increment)
        const { onProgress } = options
        let factor = 1000; //increase 1000 bytes each time
        let from = 0;
        // Increase the progress bit by bit through the loop
        while (from <= increment) {
            from + = factor
            uploadedSize + = factor
            const percent = Math.round(uploadedSize / totalSize * 100).toFixed(2);
            onProgress({percent: percent})
        }

        const speed = getSpeed();
        const remainingTime = speed != 0 ? Math.ceil((totalSize - uploadedSize) / speed) + 's' : 'Unknown'
        console.log('Remaining size:', (totalSize - uploadedSize) / 1024 / 1024, 'mb');
        console.log('Current speed:', (speed / 1024 / 1024).toFixed(2), 'mbps');
        console.log('Estimated completion:', remainingTime);
    }


    return new Promise(resolve => {
        const failArr = [];
        const queue = Queue(5, {
            "retry": 3, //Number of retries
            "retryIsJump": false, //retry now?
            "workReject": function(reason,queue){
                failArr.push(reason)
            },
            "queueEnd": function(queue){
                resolve(failArr);
            }
        })
        fileUploadChunkQueue[file.uid] = queue
        for (let partNumber = 1; partNumber <= chunkNum; partNumber + + ) {
            const exitPart = (exitPartList || []).find(exitPart => exitPart.partNumber == partNumber)
            if (exitPart) {
                // The fragments have been uploaded and are accumulated into the total amount of uploads completed. At the same time, the size of the last breakpoint upload is recorded, which is used to calculate the upload speed.
                lastUploadedSize + = new Number(exitPart.size)
                updateProcess(exitPart.size)
            } else {
                queue.push(() => uploadNext(partNumber).then(res => {
                    //Update the upload progress after the single file upload is completed
                    updateProcess(res.uploadedSize)
                }))
            }
        }
        if (queue.getLength() == 0) {
            // All fragments have been uploaded but have not been merged. Return directly to perform the merge operation.
            resolve(failArr);
            return;
        }
        queue.start()
    })
}

/**
 * el-upload custom upload method entry
 */
const handleHttpRequest = async (options) => {
    const file = options.file
    const task = await getTaskInfo(file)
    if (task) {
        const { finished, path, taskRecord } = task
        const { fileIdentifier: identifier } = taskRecord
        if (finished) {
            return path
        } else {
            const errorList = await handleUpload(file, taskRecord, options)
            if (errorList. length > 0) {
                ElNotification.error({
                    title: 'File upload error',
                    message: 'Some shards failed last time, please try to upload the file again'
                })
                return;
            }
            const { code, data, msg } = await merge(identifier)
            if (code === 200) {
                return path;
            } else {
                ElNotification.error({
                    title: 'File upload error',
                    message: msg
                })
            }
        }
    } else {
        ElNotification.error({
            title: 'File upload error',
            message: 'Failed to obtain upload task'
        })
    }
}

/**
 * Remove files from the file list
 * If there is an upload queue task object for the file, stop the task of the queue
 */
const handleRemoveFile = (uploadFile, uploadFiles) => {
    const queueObject = fileUploadChunkQueue[uploadFile.uid]
    if (queueObject) {
        queueObject.stop()
        fileUploadChunkQueue[uploadFile.uid] = undefined
    }
}

</script>
<template>
    <el-card style="width: 80%; margin: 80px auto" header="File upload in parts">
        <el-upload
            class="upload-demo"
            drag
            action="/"
            multiple
            :http-request="handleHttpRequest"
            :on-remove="handleRemoveFile">
            <el-icon class="el-icon--upload"><upload-filled /></el-icon>
            <div class="el-upload__text">
                Please drag the file here or <em>click here to upload</em>
            </div>
        </el-upload>
    </el-card>

</template>

Front-end api file

import axios from 'axios'
import axiosExtra from 'axios-extra'

const baseUrl = 'http://172.16.10.74:10003/dsj-file'

const http = axios.create({
    baseURL: baseUrl,
    headers: {
        'Dsj-Auth':'bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ0ZW5hbnRfaWQiOiIwMDAwMDAiLCJkc2pJZCI6ImFqZGhxaWRicSIsImRpc3RyaWN0Q29kZSI6IjQyMTEwMDAwMDAwMCIs InVzZXJfbmFtZSI6ImFkbWluIiwic29jaWFsQWNjb3VudElkIjpudWxsLCJyZWFsX25hbWUiOiLotoXnuqfnrqHnkIblkZgiLCJjbGllbnRfaWQiOiJzd29yZCIsInJvbGVfaWQiOiIxNDkzODIwMjY0 occasionally OTU4NjczMDUsImp0aSI6IjEwMmE3YzhhLTdiMWYtNDU4NC04ZWJjLWZiYmUyZTQyYmUzNCIsImlkZW50aXR5RHluYURhdGEiOnt9LCJhdmF0YXIiOiIxNjM1MTA4Nzk5MzQ3OTU3NzYyIiwiYX V0aG9yaXRpZXMiOlsiYWRtaW5pc3RyYXRvciJdLCJyb2xlX25hbWUiOiJhZG1pbmlzdHJhdG9yIiwiYWNjb3VudElkIjoiMTQ1Mzk5MzI5MjAxMDM5OTgxMSIsImxpY2Vuc2UiOiJwb3dlcmV kIGJ5IGRzaiIsInBvc3RfaWQiOiIxNTYwMTQ1MzUwMDY5NjY5ODg5IiwidXNlcl9pZCI6IjE1NDczOTY0ODAwNzkyMzcxMjEiLCJwaG9uZSI6IjE1ODcxOTI2MDczIiwibmlja19uYW1lIjoi6LaF57qn5 66h55CG5ZGYIiwiZGVwdF9pZCI6IjExNDIxMTAwNzQ0NjIxMDEwOCIsImFjY291bnQiOiJhZG1pbiIsImRlcHRDb2RlIjoiMTE0MjExMDA3NDQ2MjEwMTA4In0.5r85ctPgWxNarVdF9kwTNoub7 IqQM6RxTHYIU-ajxio'
    }

})

const httpExtra = axiosExtra.create({
    maxConcurrent: 5, //Concurrency is 1
    queueOptions: {
        retry: 3, //When the request fails, it will be retried up to 3 times.
        retryIsJump: false //Whether to retry immediately, otherwise the retry request will be inserted at the end of the request queue
    }
})

http.interceptors.response.use(response => {
    return response.data
})

/**
 * Get unfinished tasks based on the md5 of the file
 * @param identifier file md5
 * @returns {Promise<AxiosResponse<any>>}
 */
const taskInfo = (identifier) => {
    return http.get(`/parallel-upload/${identifier}`)
}

/**
 * Initialize a multipart upload task
 * @param identifier file md5
 * @param fileName file name
 * @param totalSize file size
 * @param chunkSize chunk size
 * @returns {Promise<AxiosResponse<any>>}
 */
const initTask = ({identifier, fileName, totalSize, chunkSize}) => {
    return http.post('/parallel-upload/init-task', {identifier, fileName, totalSize, chunkSize})
}

/**
 * Get the pre-signed multipart upload address
 * @param identifier file md5
 * @param partNumber Partition number
 * @returns {Promise<AxiosResponse<any>>}
 */
const preSignUrl = ({identifier, partNumber}) => {
    return http.get(`/parallel-upload/${identifier}/${partNumber}`)
}

/**
 * Merge shards
 * @param identifier
 * @returns {Promise<AxiosResponse<any>>}
 */
const merge = (identifier) => {
    return http.post(`/parallel-upload/merge/${identifier}`)
}

export {
    taskInfo,
    initTask,
    preSignUrl,
    merge,
    httpExtra
}

File service minio configuration

minio:
    endpoint: http://172.16.10.74:9000
    address: http://172.16.10.74
    port: 9000
    secure: false
    access-key: minioadmin
    secret-key: XXXXXXXXXX
    bucket-name: gpd
    internet-address: http://XXXXXXXXX:9000

MinioProperties.java

package com.dsj.prod.file.biz.properties;

import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.cloud.context.config.annotation.RefreshScope;
import org.springframework.stereotype.Component;

import java.util.List;


@Data
@RefreshScope
@Component
@ConfigurationProperties(prefix = "dsj.minio")
public class MinioProperties {

/**
* The constant endpoint.
*/
public String endpoint;
/**
* The constant address.
*/
public String address;

/**
* The constant port.
*/
public String port;

/**
* The constant accessKey.
*/
public String accessKey;

/**
* The constant secretKey.
*/
public String secretKey;

/**
* The constant bucketName.
*/
public String bucketName;

/**
* The constant internetAddress.
*/
public String internetAddress;

/**
* The Limit file extension.
* doc docx xls xlsx picture() pdf
*/
public List<String> limitFileExtension;

}

Controller ParallelUploadController.java

package com.dsj.prod.file.biz.controller;


import com.dsj.plf.arch.tool.api.R;
import com.dsj.prod.file.api.dto.parallelUpload.InitTaskParam;
import com.dsj.prod.file.api.dto.parallelUpload.TaskInfoDTO;
import com.dsj.prod.file.api.entity.ParallelUploadTask;
import com.dsj.prod.file.biz.service.ParallelUploadTaskService;
import com.github.xiaoymin.knife4j.annotations.ApiOperationSupport;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.web.bind.annotation.*;

import javax.annotation.Resource;
import javax.validation.Valid;
import java.util.HashMap;
import java.util.Map;



@Api(value = "File multi-part upload interface", tags = "File multi-part upload interface")
@RestController
@RequestMapping("/parallel-upload")
public class ParallelUploadController {
@Resource
private ParallelUploadTaskService sysUploadTaskService;


@ApiOperationSupport(order = 1)
@ApiOperation(value = "Get upload progress", notes = "Incoming identifier: file md5")
@GetMapping("/{identifier}")
public R<TaskInfoDTO> taskInfo(@PathVariable("identifier") String identifier) {
TaskInfoDTO result = sysUploadTaskService.getTaskInfo(identifier);
return R.data(result);
}

/**
* Create an upload task
*
* @param param the param
* @return result
*/
@ApiOperationSupport(order = 2)
@ApiOperation(value = "Create an upload task", notes = "Pass in param")
@PostMapping("/init-task")
public R<TaskInfoDTO> initTask(@Valid @RequestBody InitTaskParam param) {
return R.data(sysUploadTaskService.initTask(param));
}

@ApiOperationSupport(order = 3)
@ApiOperation(value = "Get the pre-signed upload address of each fragment", notes = "Pass in the identifier file md5, partNumber fragment serial number")
@GetMapping("/{identifier}/{partNumber}")
public R preSignUploadUrl(@PathVariable("identifier") String identifier, @PathVariable("partNumber") Integer partNumber) {
ParallelUploadTask task = sysUploadTaskService.getByIdentifier(identifier);
if (task == null) {
return R.fail("The sharding task does not exist");
}
Map<String, String> params = new HashMap<>();
params.put("partNumber", partNumber.toString());
params.put("uploadId", task.getUploadId());
return R.data(sysUploadTaskService.genPreSignUploadUrl(task.getBucketName(), task.getObjectKey(), params));
}

@ApiOperationSupport(order = 4)
@ApiOperation(value = "Merge shards", notes = "Pass in identifier file md5")
@PostMapping("/merge/{identifier}")
public R merge(@PathVariable("identifier") String identifier) {
sysUploadTaskService.merge(identifier);
return R.success("Merge successful");
}

}

Service layer ParallelUploadTaskService.java

package com.dsj.prod.file.biz.service;

import com.baomidou.mybatisplus.extension.service.IService;
import com.dsj.prod.file.api.dto.parallelUpload.InitTaskParam;
import com.dsj.prod.file.api.dto.parallelUpload.TaskInfoDTO;
import com.dsj.prod.file.api.entity.ParallelUploadTask;

import java.util.Map;

/**
 * Partial Upload - Partition task record (ParallelUploadTask) table service interface
 *
 * @since 2022-08-22 17:47:30
 */
public interface ParallelUploadTaskService extends IService<ParallelUploadTask> {

    /**
     * Get the multipart upload task based on the md5 identifier
      * @param identifier
     * @return
     */
    ParallelUploadTask getByIdentifier (String identifier);

    /**
     * Initialize a task
     */
    TaskInfoDTO initTask (InitTaskParam param);

    /**
     * Get file address
     * @param bucket
     * @param objectKey
     * @return
     */
    String getPath (String bucket, String objectKey);

    /**
     * Get upload progress
     * @param identifier
     * @return
     */
    TaskInfoDTO getTaskInfo (String identifier);

    /**
     * Generate pre-signed upload url
     * @param bucket bucket name
     * @param objectKey key of the object
     * @param params additional parameters
     * @return
     */
    String genPreSignUploadUrl (String bucket, String objectKey, Map<String, String> params);

    /**
     * Merge shards
     * @param identifier
     */
    void merge (String identifier);
}

Implementation layer ParallelUploadTaskServiceImpl

package com.dsj.prod.file.biz.service.impl;

import cn.hutool.core.date.DateUtil;
import cn.hutool.core.util.IdUtil;
import cn.hutool.core.util.StrUtil;
import com.amazonaws.HttpMethod;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.*;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.dsj.prod.file.api.constants.MinioConstant;
import com.dsj.prod.file.api.dto.parallelUpload.InitTaskParam;
import com.dsj.prod.file.api.dto.parallelUpload.TaskInfoDTO;
import com.dsj.prod.file.api.dto.parallelUpload.TaskRecordDTO;
import com.dsj.prod.file.api.entity.ParallelUploadTask;
import com.dsj.prod.file.biz.mapper.ParallelUploadMapper;
import com.dsj.prod.file.biz.properties.MinioProperties;
import com.dsj.prod.file.biz.service.ParallelUploadTaskService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.MediaType;
import org.springframework.http.MediaTypeFactory;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import java.net.URL;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

/**
 * Partial Upload - Partition task record (ParallelUploadTask) table service implementation class
 *
 * @since 2022-08-22 17:47:31
 */
@Slf4j
@Service("sysUploadTaskService")
public class ParallelUploadTaskServiceImpl extends ServiceImpl<ParallelUploadMapper, ParallelUploadTask> implements ParallelUploadTaskService {

@Resource
private AmazonS3 amazonS3;

@Resource
private MinioProperties minioProperties;

@Resource
private ParallelUploadMapper sysUploadTaskMapper;

@Override
public ParallelUploadTask getByIdentifier(String identifier) {
return sysUploadTaskMapper.selectOne(new QueryWrapper<ParallelUploadTask>().lambda().eq(ParallelUploadTask::getFileIdentifier, identifier));
}


@Override
public TaskInfoDTO initTask(InitTaskParam param) {

Date currentDate = new Date();
String bucketName = minioProperties.getBucketName();
String fileName = param.getFileName();
String suffix = fileName.substring(fileName.lastIndexOf(".") + 1);
String key = StrUtil.format("{}/{}.{}", DateUtil.format(currentDate, "YYYY-MM-dd"), IdUtil.randomUUID(), suffix);
String contentType = MediaTypeFactory.getMediaType(key).orElse(MediaType.APPLICATION_OCTET_STREAM).toString();
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setContentType(contentType);
InitiateMultipartUploadResult initiateMultipartUploadResult = amazonS3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, key)
.withObjectMetadata(objectMetadata));
String uploadId = initiateMultipartUploadResult.getUploadId();

ParallelUploadTask task = new ParallelUploadTask();
int chunkNum = (int) Math.ceil(param.getTotalSize() * 1.0 / param.getChunkSize());
task.setBucketName(minioProperties.getBucketName())
.setChunkNum(chunkNum)
.setChunkSize(param.getChunkSize())
.setTotalSize(param.getTotalSize())
.setFileIdentifier(param.getIdentifier())
.setFileName(fileName)
.setObjectKey(key)
.setUploadId(uploadId);
sysUploadTaskMapper.insert(task);
return new TaskInfoDTO().setFinished(false).setTaskRecord(TaskRecordDTO.convertFromEntity(task)).setPath(getPath(bucketName, key));
}

@Override
public String getPath(String bucket, String objectKey) {
return StrUtil.format("{}/{}/{}", minioProperties.getEndpoint(), bucket, objectKey);
}

@Override
public TaskInfoDTO getTaskInfo(String identifier) {
ParallelUploadTask task = getByIdentifier(identifier);
if (task == null) {
return null;
}
TaskInfoDTO result = new TaskInfoDTO().setFinished(true).setTaskRecord(TaskRecordDTO.convertFromEntity(task)).setPath(getPath(task.getBucketName(), task.getObjectKey()));

boolean doesObjectExist = amazonS3.doesObjectExist(task.getBucketName(), task.getObjectKey());
if (!doesObjectExist) {
// If the upload is not completed, return the uploaded fragments
ListPartsRequest listPartsRequest = new ListPartsRequest(task.getBucketName(), task.getObjectKey(), task.getUploadId());
PartListing partListing = amazonS3.listParts(listPartsRequest);
result.setFinished(false).getTaskRecord().setExitPartList(partListing.getParts());
}
return result;
}

@Override
public String genPreSignUploadUrl(String bucket, String objectKey, Map<String, String> params) {
Date currentDate = new Date();
Date expireDate = DateUtil.offsetMillisecond(currentDate, MinioConstant.PRE_SIGN_URL_EXPIRE.intValue());
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucket, objectKey)
.withExpiration(expireDate).withMethod(HttpMethod.PUT);
if (params != null) {
params.forEach(request::addRequestParameter);
}
URL preSignedUrl = amazonS3.generatePresignedUrl(request);
return preSignedUrl.toString();
}

@Override
public void merge(String identifier) {
ParallelUploadTask task = getByIdentifier(identifier);
if (task == null) {
log.error("The sharding task does not exist, task id: {}", identifier);
throw new RuntimeException("The shard task does not exist");
}

log.info("Start merging shards, task id: {}", task.getId());
ListPartsRequest listPartsRequest = new ListPartsRequest(task.getBucketName(), task.getObjectKey(), task.getUploadId());
PartListing partListing = amazonS3.listParts(listPartsRequest);
List<PartSummary> parts = partListing.getParts();
if (!task.getChunkNum().equals(parts.size())) {
//The number of uploaded chunks does not correspond to the number in the record, and chunks cannot be merged
log.error("Missing fragment, task id: {}, number of uploaded chunks: {}, number in the record: {}", task.getId(), parts.size(), task.getChunkNum ());
throw new RuntimeException("The fragment is missing, please upload again");
}
CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
.withUploadId(task.getUploadId())
.withKey(task.getObjectKey())
.withBucketName(task.getBucketName())
.withPartETags(parts.stream().map(partSummary -> new PartETag(partSummary.getPartNumber(), partSummary.getETag())).collect(Collectors.toList()));
CompleteMultipartUploadResult result = amazonS3.completeMultipartUpload(completeMultipartUploadRequest);
log.info("Merging shards is completed, return result: {}", result);
}
}

Amazon S3 tool class AmazonS3Config.java

package com.dsj.prod.file.biz.config;

import com.amazonaws.ClientConfiguration;
import com.amazonaws.Protocol;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.dsj.prod.file.biz.properties.MinioProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import javax.annotation.Resource;

@Configuration
public class AmazonS3Config {

    @Resource
    private MinioProperties minioProperties;

    @Bean(name = "amazonS3Client")
    public AmazonS3 amazonS3Client () {
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTP);
        config.setConnectionTimeout(60000);
        config.setUseExpectContinue(true);
        AWSCredentials credentials = new BasicAWSCredentials(minioProperties.getAccessKey(), minioProperties.getSecretKey());
        AwsClientBuilder.EndpointConfiguration end_point = new AwsClientBuilder.EndpointConfiguration(minioProperties.getEndpoint(), Regions.CN_NORTH_1.name());
        AmazonS3 amazonS3 = AmazonS3ClientBuilder.standard()
                .withClientConfiguration(config)
                .withCredentials(new AWSStaticCredentialsProvider(credentials))
                .withEndpointConfiguration(end_point)
                .withPathStyleAccessEnabled(true).build();
        return amazonS3;
    }

}

Preview of overall effect

General process.

1. Initialize a task and obtain the expected link. (MD5 verification will be used to determine whether there are fragments or the entire file in OSS. If there is a breakpoint fragment, the upload will start from the latest fragment. If the entire file exists, the link will be returned directly.)

2. File slice upload

3. Merge shards

4. Complete and prompt that the merge is successful.