File upload in parts, resume upload after breakpoint, and transfer large files in seconds

1. Description
The problem is that the file is too large. If you upload a large file to the server, it will cause a timeout and the file cannot be uploaded. In this case, you can cut the file into small files and upload them to the server in a loop and then splice them into a complete file. This problem can also be speeded up through concurrency.
2. Front-end
The following code is divided into 3 blocks, obtaining the initialization ID, cutting files in a loop and uploading them, determining the upload quantity and applying for merging files.

<template>
  <div class="components-upload">
    <el-upload
      action
      :auto-upload="false"
      :on-change="onChange"
      :accept="'video/*'"
      :show-file-list="false"
      drag
    >
    <i class="el-icon-plus avatar-uploader-icon"></i>
  </el-upload>
 <el-progress v-if="progressFlag" :percentage="loadProgressCount"></el-progress>
</div>
</template>
?
<script>
 
import md5 from 'js-md5'
import {<!-- --> mapGetters } from 'vuex'
?
export default {<!-- -->
  name: 'upload',
  props: {<!-- -->
    className: {<!-- -->
      type: String
    }
  },
  data (){<!-- -->
    return {<!-- -->
      video: '',
      fileMD5: '',
      progressFlag: true,
      loadProgress: 0, // Which file to proceed to? Convert loadProgressCount according to percentage
      loadProgressCount: 0, // current progress
      chunkCount: 0, //Total number of fragments
      chunkSize: 2 * 1024 * 1024, // 2MB piece
      uploadId: '782EF406BFFF421AB10AC3292ABB2ACC', // oss file identification
      videoName: ''
    }
  },
  methods:{<!-- -->
    onChange(event) {<!-- -->
      //Clear data
      Object.assign(this.$data, this.$options.data())
      this.video = event.raw;
      if(!this.video) return;
      this.videoName = this.video.name
      this.chunkCount = Math.ceil(this.video.size / this.chunkSize) // Number of slices
      let fileRederInstance = new FileReader()
      fileRederInstance.readAsBinaryString(this.video)
      fileRederInstance.addEventListener('load', e => {<!-- -->
        let fileBolb = e.target.result
        this.fileMD5 = md5(fileBolb) //The key to file transfer in seconds is that the md5 generated by the file is unique. If it is judged that the uploaded file has the same md5 value as the file originally uploaded by the database, the original file address can be directly spit out to reach the file. The effect of seconds transmission
        // 1. Initialization
        this.api({<!-- -->
          url: `${<!-- -->this.urlData.weikeUploadInitiate}?fileName=${<!-- -->this.video.name}`,
          method: "get"
        }).then(data => {<!-- -->
          if(data.status == 200){<!-- -->
            this.uploadId = data.data
?
            // 2. Perform multipart upload
            this.readChunkMD5(0)
          }else{<!-- -->
            this.$message({<!-- -->
              message: `${<!-- -->data.message}`,
              type: "error"
            });
          }
          console.log(data)
           
        })
       
      })
    },
    // slice
    getChunkInfo ( currentChunk ) {<!-- -->
      let start = currentChunk * this.chunkSize // starting position
      let end = Math.min(this.video.size, start + this.chunkSize) // end position
      let chunk = this.video.slice(start, end) // Slice content
      console.log(start,end,chunk)
      return {<!-- --> start, end, chunk }
    },
    // Perform chunk processing for each file
    readChunkMD5 (num) {<!-- -->
      const {<!-- --> chunk } = this.getChunkInfo(num)
      
      // If the currently uploaded files are less than the total number, perform the upload operation. If the currently uploaded files are greater than the current number, perform the merge operation.
      if(num <this.chunkCount){<!-- -->
        let fetchForm = new FormData()
        fetchForm.append('chunk', num + 1) //Current number of shards
        fetchForm.append('chunks', this.chunkCount)
        fetchForm.append('file', chunk) //Current fragment file content
        fetchForm.append('md5', this.fileMD5)
        fetchForm.append('objectName', this.videoName)
        fetchForm.append('uploadId', this.uploadId)
        fetchForm.append('curPartSize', chunk.size)
        this.api({<!-- -->
          url: this.urlData.weikeUpload,
          method: "post",
          data: fetchForm
        }).then(data => {<!-- -->
          console.log(data)
          if (data.status == 200) {<!-- -->
            // Update progress for each upload
            this.loadProgress++
            num = num + 1
            console.log(num)
              // Continue the upload operation
            this.readChunkMD5(num)
          } else {<!-- -->
              // If it keeps failing, you may need to determine a number of failures and then pop up the failure
            this.readChunkMD5(num)
            console.log("Failure data".chunkInfo)
            this.$message({<!-- -->
              message: `${<!-- -->data.message}`,
              type: "error"
            });
          }
        }).catch((e)=>{<!-- -->
            // If it fails, execute it again (the local test will cause an upload timeout, I don’t know how to solve it, so I will upload this fragment here)
          this.readChunkMD5(num)
        })
      }else{<!-- -->
          // merge
        this.api({<!-- -->
          url: this.urlData.weikeUploadComplete,
          method: "post",
          data: {<!-- -->
            uploadId: this.uploadId,
            objectName: this.videoName
          }
        }).then(data => {<!-- -->
           console.log(data);
        }).catch((e)=>{<!-- -->
           
        })
      }
    }
  },
  watch: {<!-- -->
      // Monitor progress changes
    loadProgress(newVal, oldVal) {<!-- -->
      console.log("Value changes")
      if(this.loadProgress == 0){<!-- -->
        this.loadProgressCount = 0
      }else{<!-- -->
        this.loadProgressCount = Math.floor(this.loadProgress / this.chunkCount * 100)
      }
    }
  },
  computed: {<!-- -->
    ...mapGetters([
      'urlData',
      'host'
    ])
  }
}
</script>
?
<style>
  .components-upload .el-upload , .components-upload .el-upload-dragger{<!-- -->
    width:100% !important;
  }
</style>
<style lang="scss" scoped>
  .components-upload{<!-- -->
    width:100%;
    // .input{<!-- -->
    // width:100%;
    // }
  }
</style>

3. Java + OSS + Redis to implement slicing
Controller parameter

/**
     * Initialize the sharding interface
     * @return
     */
@GetMapping( "/upload/initiate")
@ResponseBody
private Result initiate(@RequestParam("fileName") String fileName){<!-- -->
?
    String uploadId = ossUploadService.initiate(fileName);
    return Result.success("uploadId",uploadId);
}
// Upload
@RequestMapping("/upload")
@ResponseBody
private Result upload(@RequestParam("file") MultipartFile file,HttpServletRequest request) throws IOException {<!-- -->
    Integer chunk = Integer.valueOf(request.getParameter("chunk"));
    Integer chunks = Integer.valueOf(request.getParameter("chunks"));
    // md5 can determine whether there is a direct return file address in the database (large files can be transferred in seconds)
    String md5 = request.getParameter("md5");
    String objectName = request.getParameter("objectName");
    String uploadId = request.getParameter("uploadId");
    Integer curPartSize = Integer.valueOf(request.getParameter("curPartSize"));
    InputStream in = file.getInputStream();
    ossUploadService.upload(objectName, uploadId, in, curPartSize, chunk);
    return Result.success();
}
?
// merge
@PostMapping("/upload/complete")
@ResponseBody
private Result complete(@RequestBody Map<String,Object> request) {<!-- -->
    String objectName = (String) request.get("objectName");
    String uploadId = (String) request.get("uploadId");
    ossUploadService.complete(objectName,uploadId);
    return Result.success();
}

impl implementation
?

import cn.hutool.json.JSONString;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.aliyun.oss.OSSClient;
import com.aliyun.oss.model.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.redis.core.BoundHashOperations;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;
?
import java.io.FileInputStream;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadPoolExecutor;
?
@Service
public class OssUploadServiceImpl implements OssUploadService {<!-- -->
    @Autowired
    private OSSClient ossClient
        
    @Value("${aliyun.oss.bucketName}")
    private String bucketName;
?
    private String path = "H5/weike/video/";
    @Autowired
    private RedisTemplate redisTemplate;
    /**
     * OSS multi-part upload initialization
     * @param objectName
     * @return uploadId
     */
    @Override
    public String initiate(String objectName){<!-- -->
?
        //Create the InitiateMultipartUploadRequest object.
        System.out.println(ossClient);
        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName,path.concat(objectName));
        InitiateMultipartUploadResult upresult = ossClient.initiateMultipartUpload(request);
        // Return uploadId, which is the unique identifier of the multipart upload event. You can initiate related operations based on the uploadId, such as canceling multipart uploads, querying multipart uploads, etc.
        String uploadId = upresult.getUploadId();
        return uploadId;
    }
?
    /**
     * Upload fragments
     * @param objectName file name
     * @param uploadId initialization ID
     * @param inputStream file stream
     * @param curPartSize current slice size
     * @param partNum which slice
     * @return
     */
    @Override
    public void upload(String objectName, String uploadId, InputStream inputStream, Integer curPartSize, Integer partNum){<!-- -->
        
        UploadPartRequest uploadPartRequest = new UploadPartRequest();
        uploadPartRequest.setBucketName(bucketName);
        uploadPartRequest.setKey(path.concat(objectName));
        uploadPartRequest.setUploadId(uploadId);
        uploadPartRequest.setInputStream(inputStream);
        //Set the shard size. Except for the last shard, which has no size limit, the other shards have a minimum size of 100 KB.
        uploadPartRequest.setPartSize(curPartSize);
        //Set the fragment number. Each uploaded fragment has a fragment number, which is
        //
        //The value range is 1~10000. If it exceeds this range, OSS will return the InvalidArgument error code.
        uploadPartRequest.setPartNumber(partNum);
        //Each fragment does not need to be uploaded in order, and can even be uploaded on different clients. OSS will sort the files according to the fragment number to form a complete file.
        UploadPartResult uploadPartResult = ossClient.uploadPart(uploadPartRequest);
        // After each fragment is uploaded, the result returned by OSS includes PartETag. PartETag will be saved in partETags.
        PartETag partETag = uploadPartResult.getPartETag();
        String string = JSON.toJSONString(partETag);
        redisTemplate.opsForList().leftPush(uploadId, string);
    }
?
    /**
     * Merge shards
     * @param objectName
     * @param uploadId
     */
    @Override
    public void complete(String objectName, String uploadId){<!-- -->
?
        List<PartETag> partETags = new ArrayList<>();
        Long size = redisTemplate.opsForList().size(uploadId);
        List<String> partETagList = redisTemplate.opsForList().range(uploadId, 0, size);
        for (String s : partETagList) {<!-- -->
            PartETag partETag = JSON.parseObject(s, PartETag.class);
            partETags.add(partETag);
        }
        //Create a CompleteMultipartUploadRequest object.
        // When performing a multipart upload operation, all valid partETags need to be provided. After receiving the submitted partETags, OSS will verify the validity of each shard one by one. When all data fragments pass verification, OSS will combine these fragments into a complete file.
        CompleteMultipartUploadRequest completeMultipartUploadRequest =
                new CompleteMultipartUploadRequest(bucketName,path.concat(objectName), uploadId, partETags);
?
        // Complete upload.
        ossClient.completeMultipartUpload(completeMultipartUploadRequest);
    }
?
?
}

?
4. Java implements local multipart upload (not corresponding to the front-end code)
The principle is the same. The content of the received slices is saved locally, and then after the reception is completed, all the slices are integrated into one file.

package com.test.controller;
?
import org.apache.commons.fileupload.FileItem;
import org.apache.commons.fileupload.FileUploadException;
import org.apache.commons.fileupload.disk.DiskFileItemFactory;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import org.apache.commons.io.FileUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
?
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.*;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
?
@Controller
public class UploadController {<!-- -->
    @Value("${shopk.upload.charset}")
    private String charset;
    @Value("${shopk.upload.temFile}")
    private String temFile;
    @RequestMapping("/upload")
    @ResponseBody
    public Object upload(HttpServletRequest request, HttpServletResponse response) throws Exception {<!-- -->
?
        response.setCharacterEncoding(charset);
        Integer chunk = null;
        Integer chunks = null;
        String name = null;
        // Match the md5 with the file. If the md5 exists, there is no need to upload it. It will directly prompt that the upload is successful and return the corresponding file address.
        String md5 = null;
        String filePath = temFile;
        BufferedOutputStream os = null;
?
?
        try{<!-- -->
// parser factory
            DiskFileItemFactory fileItemFactory = new DiskFileItemFactory();
// buffer
            fileItemFactory.setSizeThreshold(1024);
//temporary directory
            fileItemFactory.setRepository(new File(filePath));
//File upload parser
            ServletFileUpload servletFileUpload = new ServletFileUpload(fileItemFactory);
            //Single file 5M limit
            servletFileUpload.setFileSizeMax(5l * 1024l * 1024l * 1024l);
            //Total file limit of 10G
            servletFileUpload.setSizeMax(10l * 1024l * 1024l * 1024l);
            List<FileItem> items = servletFileUpload.parseRequest(request);
            for (FileItem item : items) {<!-- -->
                if(item.isFormField()){<!-- -->
                    if("chunk".equals(item.getFieldName())){<!-- -->
                        chunk = Integer.parseInt(item.getString(charset));
                    }
                    if("chunks".equals(item.getFieldName())){<!-- -->
                        chunks = Integer.parseInt(item.getString(charset));
                    }
                    if("name".equals(item.getFieldName())){<!-- -->
                        name = item.getString(charset);
                    }
                    if("md5".equals(item.getFieldName())){<!-- -->
                        md5 = item.getString(charset);
                    }
                }
            }
            for (FileItem item : items) {<!-- -->
                if(!item.isFormField()){<!-- -->
                    String temFileName = name;
                    if(name != null){<!-- -->
                        if(chunk != null){<!-- -->
                            temFileName = chunk + "_" + md5 + name;
                        }
                        File temFile = new File(filePath, temFileName);
                        if(!temFile.exists()){<!-- --> // If it is judged that the file already exists, there is no need to upload it again. You can resume the upload at a breakpoint.
                            System.out.println(item);
                            item.write(temFile);
                        }
                    }
                }
            }
            //File merge
            if(chunk != null & amp; & amp; chunk.intValue() == chunks.intValue() - 1){<!-- -->
                File tempFile = new File(filePath, name);
                os = new BufferedOutputStream(new FileOutputStream(tempFile));
?
                for(int i = 0;i < chunks;i + + ){<!-- -->
                    File file = new File(filePath, i + "_" + md5 + name);
                    while(!file.exists()){<!-- -->
                        Thread.sleep(100);
                    }
                    byte[] bytes = FileUtils.readFileToByteArray(file);
                    os.write(bytes);
                    os.flush();
                    file.delete();
                }
                os.flush();
            }
            Map<String,Object> result= new HashMap<>();
            result.put("code",200);
            result.put("message","success");
            return result;
        } catch (FileUploadException | UnsupportedEncodingException e) {<!-- -->
            e.printStackTrace();
            Map<String,Object> result= new HashMap<>();
            result.put("code",1000);
            result.put("message","failure");
            return result;
        } finally {<!-- -->
            if(os != null){<!-- -->
                try {<!-- -->
                    os.close();
                } catch (IOException e) {<!-- -->
                    e.printStackTrace();
                }
            }
        }
?
    }
}

?
Reference article: http://blog.ncmem.com/wordpress/2023/10/23/File segmented upload, breakpoint resume upload, large file instant transfer/
Welcome to join the group to discuss