Springboot project implements breakpoint resume function

This article mainly introduces the springboot project to implement breakpoint resumption. This article introduces it to you in great detail through sample code. It has certain reference value for everyone’s study or work. Friends who need it can refer to it.
java code

package com.ruoyi.web.upload.controller;
import com.ruoyi.web.upload.dto.FileChunkDTO;
import com.ruoyi.web.upload.dto.FileChunkResultDTO;
import com.ruoyi.web.upload.result.Result;
import com.ruoyi.web.upload.service.IUploadService;
import org.springframework.web.bind.annotation.*;
import javax.annotation.Resource;
/**
 * @ProjectName UploaderController
 * @author Administrator
 * @version 1.0.0
 * @Description Attachment upload in parts
 * @createTime 2022/4/13 0013 15:58
 */
@RestController
@RequestMapping("upload")
public class UploaderController {<!-- -->
   @Resource
   private IUploadService uploadService;
   /**
    * Check if the shard exists
    *
    * @return
    */
   @GetMapping("chunk")
   public Result checkChunkExist(FileChunkDTO chunkDTO) {<!-- -->
      FileChunkResultDTO fileChunkCheckDTO;
      try {<!-- -->
         fileChunkCheckDTO = uploadService.checkChunkExist(chunkDTO);
         return Result.ok(fileChunkCheckDTO);
      } catch (Exception e) {<!-- -->
         return Result.fail(e.getMessage());
      }
   }
   /**
    * Upload file fragments
    *
    * @param chunkDTO
    * @return
    */
   @PostMapping("chunk")
   public Result uploadChunk(FileChunkDTO chunkDTO) {<!-- -->
      try {<!-- -->
         uploadService.uploadChunk(chunkDTO);
         return Result.ok(chunkDTO.getIdentifier());
      } catch (Exception e) {<!-- -->
         return Result.fail(e.getMessage());
      }
   }
   /**
    * Request to merge file fragments
    *
    * @param chunkDTO
    * @return
    */
   @PostMapping("merge")
   public Result mergeChunks(@RequestBody FileChunkDTO chunkDTO) {<!-- -->
      try {<!-- -->
         boolean success = uploadService.mergeChunk(chunkDTO.getIdentifier(), chunkDTO.getFilename(), chunkDTO.getTotalChunks());
         return Result.ok(success);
      } catch (Exception e) {<!-- -->
         return Result.fail(e.getMessage());
      }
   }
}
package com.ruoyi.web.upload.dto;
import org.springframework.web.multipart.MultipartFile;
/**
 * @ProjectName FileChunkDTO
 * @author Administrator
 * @version 1.0.0
 * @Description Attachment upload in parts
 * @createTime 2022/4/13 0013 15:59
 */
public class FileChunkDTO {<!-- -->
   /**
    * file md5
    */
   private String identifier;
   /**
    * Chunked file
    */
   MultipartFile file;
   /**
    * Current block sequence number
    */
   private Integer chunkNumber;
   /**
    * Chunk size
    */
   private Long chunkSize;
   /**
    * Current chunk size
    */
   private Long currentChunkSize;
   /**
    *Total file size
    */
   private Long totalSize;
   /**
    *Total number of blocks
    */
   private Integer totalChunks;
   /**
    * file name
    */
   private String filename;
   public String getIdentifier() {<!-- -->
      return identifier;
   }
   public void setIdentifier(String identifier) {<!-- -->
      this.identifier = identifier;
   }
   public MultipartFile getFile() {<!-- -->
      return file;
   }
   public void setFile(MultipartFile file) {<!-- -->
      this.file = file;
   }
   public Integer getChunkNumber() {<!-- -->
      return chunkNumber;
   }
   public void setChunkNumber(Integer chunkNumber) {<!-- -->
      this.chunkNumber = chunkNumber;
   }
   public Long getChunkSize() {<!-- -->
      return chunkSize;
   }
   public void setChunkSize(Long chunkSize) {<!-- -->
      this.chunkSize = chunkSize;
   }
   public Long getCurrentChunkSize() {<!-- -->
      return currentChunkSize;
   }
   public void setCurrentChunkSize(Long currentChunkSize) {<!-- -->
      this.currentChunkSize = currentChunkSize;
   }
   public Long getTotalSize() {<!-- -->
      return totalSize;
   }
   public void setTotalSize(Long totalSize) {<!-- -->
      this.totalSize = totalSize;
   }
   public Integer getTotalChunks() {<!-- -->
      return totalChunks;
   }
   public void setTotalChunks(Integer totalChunks) {<!-- -->
      this.totalChunks = totalChunks;
   }
   public String getFilename() {<!-- -->
      return filename;
   }
   public void setFilename(String filename) {<!-- -->
      this.filename = filename;
   }
   @Override
   public String toString() {<!-- -->
      return "FileChunkDTO{" +
              "identifier='" + identifier + '\'' +
              ", file=" + file +
              ", chunkNumber=" + chunkNumber +
              ", chunkSize=" + chunkSize +
              ", currentChunkSize=" + currentChunkSize +
              ", totalSize=" + totalSize +
              ", totalChunks=" + totalChunks +
              ", filename='" + filename + '\'' +
              '}';
   }
}
package com.ruoyi.web.upload.dto;
import java.util.Set;
/**
 * @ProjectName FileChunkResultDTO
 * @author Administrator
 * @version 1.0.0
 * @Description Attachment upload in parts
 * @createTime 2022/4/13 0013 15:59
 */
public class FileChunkResultDTO {<!-- -->
   /**
    * Whether to skip uploading
    */
   private Boolean skipUpload;
   /**
    * Collection of uploaded shards
    */
   private Set<Integer> uploaded;
   public Boolean getSkipUpload() {<!-- -->
      return skipUpload;
   }
   public void setSkipUpload(Boolean skipUpload) {<!-- -->
      this.skipUpload = skipUpload;
   }
   public Set<Integer> getUploaded() {<!-- -->
      return uploaded;
   }
   public void setUploaded(Set<Integer> uploaded) {<!-- -->
      this.uploaded = uploaded;
   }
   public FileChunkResultDTO(Boolean skipUpload, Set<Integer> uploaded) {<!-- -->
      this.skipUpload = skipUpload;
      this.uploaded = uploaded;
   }
   public FileChunkResultDTO(Boolean skipUpload) {<!-- -->
      this.skipUpload = skipUpload;
   }
}
package com.ruoyi.web.upload.dto;
import lombok.Getter;
/**
   * @Author
   * @Date Created in 2023/2/23 17:25
   * @DESCRIPTION: Unified return result status information class
   * @Version V1.0
   */
@Getter
@SuppressWarnings("all")
public enum ResultCodeEnum {<!-- -->
    SUCCESS(200,"success"),
    FAIL(201, "Failure"),
    PARAM_ERROR(202, "The parameter is incorrect"),
    SERVICE_ERROR(203, "Service exception"),
    DATA_ERROR(204, "Data exception"),
    DATA_UPDATE_ERROR(205, "Data version exception"),
    LOGIN_AUTH(208, "Not logged in"),
    PERMISSION(209, "No permission"),
    CODE_ERROR(210, "Verification code error"),
    LOGIN_MOBLE_ERROR(211, "Incorrect account"),
    LOGIN_DISABLED_ERROR(212, "The user has been disabled"),
    REGISTER_MOBLE_ERROR(213, "Mobile phone number format is incorrect"),
    REGISTER_MOBLE_ERROR_NULL(214, "Mobile phone number is empty"),
    LOGIN_AURH(214, "Login required"),
    LOGIN_ACL(215, "No permission"),
    URL_ENCODE_ERROR(216, "URL encoding failed"),
    ILLEGAL_CALLBACK_REQUEST_ERROR(217, "Illegal callback request"),
    FETCH_ACCESSTOKEN_FAILD(218, "Failed to obtain accessToken"),
    FETCH_USERINFO_ERROR(219, "Failed to obtain user information");
    private Integer code;
    private String message;
    private ResultCodeEnum(Integer code, String message) {<!-- -->
        this.code = code;
        this.message = message;
    }
}
package com.ruoyi.web.upload.service;
import com.ruoyi.web.upload.dto.FileChunkDTO;
import com.ruoyi.web.upload.dto.FileChunkResultDTO;
import java.io.IOException;
/**
 * @ProjectName IUploadService
 * @author Administrator
 * @version 1.0.0
 * @Description Attachment upload in parts
 * @createTime 2022/4/13 0013 15:59
 */
public interface IUploadService {<!-- -->
   /**
    * Check whether the file exists. If it exists, skip uploading the file. If it does not exist, return the set of shards that need to be uploaded.
    * @param chunkDTO
    * @return
    */
   FileChunkResultDTO checkChunkExist(FileChunkDTO chunkDTO);
   /**
    * Upload file fragments
    * @param chunkDTO
    */
   void uploadChunk(FileChunkDTO chunkDTO) throws IOException;
   /**
    * Merge file fragments
    * @param identifier
    * @param fileName
    * @param totalChunks
    * @return
    * @throwsIOException
    */
   boolean mergeChunk(String identifier,String fileName,Integer totalChunks)throws IOException;
}
package com.ruoyi.web.upload.service.impl;
import com.ruoyi.web.upload.dto.FileChunkDTO;
import com.ruoyi.web.upload.dto.FileChunkResultDTO;
import com.ruoyi.web.upload.service.IUploadService;
import org.apache.tomcat.util.http.fileupload.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;
import java.io.*;
import java.util.*;
/**
 * @ProjectName UploadServiceImpl
 * @author Administrator
 * @version 1.0.0
 * @Description Attachment upload in parts
 * @createTime 2022/4/13 0013 15:59
 */
@Service
@SuppressWarnings("all")
public class UploadServiceImpl implements IUploadService {<!-- -->
   private Logger logger = LoggerFactory.getLogger(UploadServiceImpl.class);
   @Autowired
   private RedisTemplate redisTemplate;
   @Value("${ruoyi.profile}")
   private String uploadFolder;
   /**
    * Check whether the file exists. If it exists, skip uploading the file. If it does not exist, return the set of shards that need to be uploaded.
    * Check if the shard exists
          ○ Check whether the files in the directory exist.
          ○ Check whether the shards stored in redis exist.
          ○ Determine whether the number of shards is consistent with the total number of shards.
               If the file exists and the multipart upload is completed, it indicates that the upload of the attachment has been completed and the instant transfer operation can be performed.
               If the file does not exist or the fragment has been uploaded, false is returned and the uploaded fragment information is returned.
    * @param chunkDTO
    * @return
    */
   @Override
   public FileChunkResultDTO checkChunkExist(FileChunkDTO chunkDTO) {<!-- -->
      //1. Check whether the file has been uploaded
      //1.1) Check whether it exists on the disk
      String fileFolderPath = getFileFolderPath(chunkDTO.getIdentifier());
      logger.info("fileFolderPath-->{}", fileFolderPath);
      String filePath = getFilePath(chunkDTO.getIdentifier(), chunkDTO.getFilename());
      File file = new File(filePath);
      boolean exists = file.exists();
      //1.2) Check whether it exists in Redis and all shards have been uploaded.
      Set<Integer> uploaded = (Set<Integer>) redisTemplate.opsForHash().get(chunkDTO.getIdentifier(), "uploaded");
      if (uploaded != null & amp; & amp; uploaded.size() == chunkDTO.getTotalChunks() & amp; & amp; exists) {<!-- -->
         return new FileChunkResultDTO(true);
      }
      File fileFolder = new File(fileFolderPath);
      if (!fileFolder.exists()) {<!-- -->
         boolean mkdirs = fileFolder.mkdirs();
         logger.info("Preparation, create folder, fileFolderPath:{},mkdirs:{}", fileFolderPath, mkdirs);
      }
      //Resume upload from breakpoint and return uploaded fragments
      return new FileChunkResultDTO(false, uploaded);
   }
   /**
    * Upload fragments
    * Upload attachment fragments
           ○ Determine whether the directory exists. If it does not exist, create the directory.
           ○ Copy the slices to the specified directory.
           ○ Write the shard into redis
    * @param chunkDTO
    */
   @Override
   public void uploadChunk(FileChunkDTO chunkDTO) {<!-- -->
      //chunked directory
      String chunkFileFolderPath = getChunkFileFolderPath(chunkDTO.getIdentifier());
      logger.info("chunked directory -> {}", chunkFileFolderPath);
      File chunkFileFolder = new File(chunkFileFolderPath);
      if (!chunkFileFolder.exists()) {<!-- -->
         boolean mkdirs = chunkFileFolder.mkdirs();
         logger.info("Create shard folder:{}", mkdirs);
      }
      //Write to shard
      try (
              InputStream inputStream = chunkDTO.getFile().getInputStream();
              FileOutputStream outputStream = new FileOutputStream(new File(chunkFileFolderPath + chunkDTO.getChunkNumber()))
      ) {<!-- -->
         IOUtils.copy(inputStream, outputStream);
         logger.info("File identification:{},chunkNumber:{}", chunkDTO.getIdentifier(), chunkDTO.getChunkNumber());
         //Write the shard to redis
         long size = saveToRedis(chunkDTO);
      } catch (Exception e) {<!-- -->
         e.printStackTrace();
      }
   }
   @Override
   public boolean mergeChunk(String identifier, String fileName, Integer totalChunks) throws IOException {<!-- -->
      return mergeChunks(identifier, fileName, totalChunks);
   }
   /**
    * Merge shards
    *
    * @param identifier
    * @param filename
    */
   private boolean mergeChunks(String identifier, String filename, Integer totalChunks) {<!-- -->
      String chunkFileFolderPath = getChunkFileFolderPath(identifier);
      String filePath = getFilePath(identifier, filename);
      // Check if all shards exist
      if (checkChunks(chunkFileFolderPath, totalChunks)) {<!-- -->
         File chunkFileFolder = new File(chunkFileFolderPath);
         File mergeFile = new File(filePath);
         File[] chunks = chunkFileFolder.listFiles();
         // Slice sorting 1, 2/3, ---
         List fileList = Arrays.asList(chunks);
         Collections.sort(fileList, (Comparator<File>) (o1, o2) -> {<!-- -->
            return Integer.parseInt(o1.getName()) - (Integer.parseInt(o2.getName()));
         });
         try {<!-- -->
            RandomAccessFile randomAccessFileWriter = new RandomAccessFile(mergeFile, "rw");
            byte[] bytes = new byte[1024];
            for (File chunk : chunks) {<!-- -->
               RandomAccessFile randomAccessFileReader = new RandomAccessFile(chunk, "r");
               int len;
               while ((len = randomAccessFileReader.read(bytes)) != -1) {<!-- -->
                  randomAccessFileWriter.write(bytes, 0, len);
               }
               randomAccessFileReader.close();
            }
            randomAccessFileWriter.close();
         } catch (Exception e) {<!-- -->
            return false;
         }
         return true;
      }
      return false;
   }
   /**
    * Check whether all shards exist
    * @param chunkFileFolderPath
    * @param totalChunks
    * @return
    */
   private boolean checkChunks(String chunkFileFolderPath, Integer totalChunks) {<!-- -->
      try {<!-- -->
         for (int i = 1; i <= totalChunks + 1; i + + ) {<!-- -->
            File file = new File(chunkFileFolderPath + File.separator + i);
            if (file.exists()) {<!-- -->
               continue;
            } else {<!-- -->
               return false;
            }
         }
      } catch (Exception e) {<!-- -->
         return false;
      }
      return true;
   }
   /**
    * Write shards to Redis
    * Determine whether the slice already exists. If it does not exist, create the basic information and save it.
    * @param chunkDTO
    */
   private synchronized long saveToRedis(FileChunkDTO chunkDTO) {<!-- -->
      Set<Integer> uploaded = (Set<Integer>) redisTemplate.opsForHash().get(chunkDTO.getIdentifier(), "uploaded");
      if (uploaded == null) {<!-- -->
         uploaded = new HashSet<>(Arrays.asList(chunkDTO.getChunkNumber()));
         HashMap<String, Object> objectObjectHashMap = new HashMap<>();
         objectObjectHashMap.put("uploaded", uploaded);
         objectObjectHashMap.put("totalChunks", chunkDTO.getTotalChunks());
         objectObjectHashMap.put("totalSize", chunkDTO.getTotalSize());
// objectObjectHashMap.put("path", getFileRelativelyPath(chunkDTO.getIdentifier(), chunkDTO.getFilename()));
         objectObjectHashMap.put("path", chunkDTO.getFilename());
         redisTemplate.opsForHash().putAll(chunkDTO.getIdentifier(), objectObjectHashMap);
      } else {<!-- -->
         uploaded.add(chunkDTO.getChunkNumber());
         redisTemplate.opsForHash().put(chunkDTO.getIdentifier(), "uploaded", uploaded);
      }
      return uploaded.size();
   }
   /**
    * Get the absolute path of the file
    *
    * @param identifier
    * @param filename
    * @return
    */
   private String getFilePath(String identifier, String filename) {<!-- -->
      String ext = filename.substring(filename.lastIndexOf("."));
// return getFileFolderPath(identifier) + identifier + ext;
      return uploadFolder + filename;
   }
   /**
    * Get the relative path of the file
    *
    * @param identifier
    * @param filename
    * @return
    */
   private String getFileRelativelyPath(String identifier, String filename) {<!-- -->
      String ext = filename.substring(filename.lastIndexOf("."));
      return "/" + identifier.substring(0, 1) + "/" +
              identifier.substring(1, 2) + "/" +
              identifier + "/" + identifier
               + ext;
   }
   /**
    * Get the directory where the chunked file belongs
    *
    * @param identifier
    * @return
    */
   private String getChunkFileFolderPath(String identifier) {<!-- -->
      return getFileFolderPath(identifier) + "chunks" + File.separator;
   }
   /**
    * Get the directory where the file belongs
    *
    * @param identifier
    * @return
    */
   private String getFileFolderPath(String identifier) {<!-- -->
      return uploadFolder + identifier.substring(0, 1) + File.separator +
              identifier.substring(1, 2) + File.separator +
              identifier + File.separator;
// return uploadFolder;
   }
}
package com.ruoyi.web.upload.result;
import com.ruoyi.web.upload.dto.ResultCodeEnum;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
/**
 * @Author
 * @Date Created in 2023/2/23 17:25
 * @DESCRIPTION: Globally unified return results
 * @Version V1.0
 */
@Data
@ApiModel(value = "Global unified return results")
@SuppressWarnings("all")
public class Result<T> {<!-- -->
    @ApiModelProperty(value = "return code")
    private Integer code;
    @ApiModelProperty(value = "return message")
    private String message;
    @ApiModelProperty(value = "return data")
    private T data;
    private Long total;
    public Result(){<!-- -->}
    protected static <T> Result<T> build(T data) {<!-- -->
        Result<T> result = new Result<T>();
        if (data != null)
            result.setData(data);
        return result;
    }
    public static <T> Result<T> build(T body, ResultCodeEnum resultCodeEnum) {<!-- -->
        Result<T> result = build(body);
        result.setCode(resultCodeEnum.getCode());
        result.setMessage(resultCodeEnum.getMessage());
        return result;
    }
    public static <T> Result<T> build(Integer code, String message) {<!-- -->
        Result<T> result = build(null);
        result.setCode(code);
        result.setMessage(message);
        return result;
    }
    public static<T> Result<T> ok(){<!-- -->
        return Result.ok(null);
    }
    /**
     * Successful operation
     * @param data
     * @param <T>
     * @return
     */
    public static<T> Result<T> ok(T data){<!-- -->
        Result<T> result = build(data);
        return build(data, ResultCodeEnum.SUCCESS);
    }
    public static<T> Result<T> fail(){<!-- -->
        return Result.fail(null);
    }
    /**
     * operation failed
     * @param data
     * @param <T>
     * @return
     */
    public static<T> Result<T> fail(T data){<!-- -->
        Result<T> result = build(data);
        return build(data, ResultCodeEnum.FAIL);
    }
    public Result<T> message(String msg){<!-- -->
        this.setMessage(msg);
        return this;
    }
    public Result<T> code(Integer code){<!-- -->
        this.setCode(code);
        return this;
    }
    public boolean isOk() {<!-- -->
        if(this.getCode().intValue() == ResultCodeEnum.SUCCESS.getCode().intValue()) {<!-- -->
            return true;
        }
        return false;
    }
}

front-end code
mainjs import uploader

import uploader from 'vue-simple-uploader'
Vue.use(uploader)

Install uploader and spark-md5 dependencies

npm install --save vue-simple-uploader
npm install --save spark-md5

Create uploader component

<template>
  <div>
    <uploader
        :autoStart="false"
        :options="options"
        :file-status-text="statusText"
        class="uploader-example"
        @file-complete="fileComplete"
        @complete="complete"
        @file-success="fileSuccess"
        @files-added="filesAdded"
    >
      <uploader-unsupport></uploader-unsupport>
      <uploader-drop>
        <p>Drag and drop files here to upload</p>
        <uploader-btn>Select file</uploader-btn>
        <uploader-btn :attrs="attrs">Select picture</uploader-btn>
        <uploader-btn :directory="true">Select folder</uploader-btn>
      </uploader-drop>
      <!-- <uploader-list></uploader-list> -->
      <uploader-files> </uploader-files>
    </uploader>
    <br />
    <el-button @click="allStart()" :disabled="disabled">Start all</el-button>
    <el-button @click="allStop()" style="margin-left: 4px">Pause all</el-button>
    <el-button @click="allRemove()" style="margin-left: 4px">Remove all</el-button>
  </div>
</template>
<script>
import axios from "axios";
import SparkMD5 from "spark-md5";
import {<!-- -->upload} from "@/api/user";
// import storage from "store";
// import { ACCESS_TOKEN } from '@/store/mutation-types'
export default {<!-- -->
  name: "Home",
  data() {<!-- -->
    return {<!-- -->
      skip: false,
      options: {<!-- -->
        target: "//localhost:9999/upload/chunk",
        // Enable server-side fragmentation verification function
        testChunks: true,
        parseTimeRemaining: function (timeRemaining, parsedTimeRemaining) {<!-- -->
          return parsedTimeRemaining
              .replace(/\syears?/, "year")
              .replace(/\days?/, "days")
              .replace(/\shours?/, "hours")
              .replace(/\sminutes?/, "minutes")
              .replace(/\sseconds?/, "seconds");
        },
        // Server fragmentation verification function
        checkChunkUploadedByResponse: (chunk, message) => {<!-- -->
          const result = JSON.parse(message);
          if (result.data.skipUpload) {<!-- -->
            this.skip = true;
            return true;
          }
          return (result.data.uploaded || []).indexOf(chunk.offset + 1) >= 0;
        },
        // headers: {<!-- -->
        // // The verification added in the header should be based on the actual business.
        // "Access-Token": storage.get(ACCESS_TOKEN),
        // },
      },
      attrs: {<!-- -->
        accept: "image/*",
      },
      statusText: {<!-- -->
        success: "Upload successful",
        error: "Upload error",
        uploading: "Uploading...",
        paused: "Paused...",
        waiting: "Waiting...",
        cmd5: "Calculating file MD5...",
      },
      fileList: [],
      disabled: true,
    };
  },
  watch: {<!-- -->
    fileList(o, n) {<!-- -->
      this.disabled = false;
    },
  },
  methods: {<!-- -->
    // fileSuccess(rootFile, file, response, chunk) {<!-- -->
    // // console.log(rootFile);
    // // console.log(file);
    // // console.log(message);
    // // console.log(chunk);
    // const result = JSON.parse(response);
    // console.log(result.success, this.skip);
    //
    // if (result.success & amp; & amp; !this.skip) {<!-- -->
    //axios
    // .post(
    // "http://127.0.0.1:9999/upload/merge",
    // {<!-- -->
    // identifier: file.uniqueIdentifier,
    // filename: file.name,
    // totalChunks: chunk.offset,
    // },
    // // {<!-- -->
    // // headers: { "Access-Token": storage.get(ACCESS_TOKEN) }
    // // }
    // )
    // .then((res) => {<!-- -->
    // if (res.data.success) {<!-- -->
    // console.log("Upload successful");
    // } else {<!-- -->
    // console.log(res);
    // }
    // })
    // .catch(function (error) {<!-- -->
    // console.log(error);
    // });
    // } else {<!-- -->
    // console.log("Upload successful, no need to merge");
    // }
    // if (this.skip) {<!-- -->
    // this.skip = false;
    // }
    // },
    fileSuccess(rootFile, file, response, chunk) {<!-- -->
      // console.log(rootFile);
      // console.log(file);
      // console.log(message);
      // console.log(chunk);
      const result = JSON.parse(response);
      console.log(result.success, this.skip);
      const user = {<!-- -->
        identifier: file.uniqueIdentifier,
        filename: file.name,
        totalChunks: chunk.offset,
      }
      if (result.success & amp; & amp; !this.skip) {<!-- -->
        upload(user).then((res) => {<!-- -->
          if (res.code == 200) {<!-- -->
            console.log("Upload successful");
          } else {<!-- -->
            console.log(res);
          }
        })
            .catch(function (error) {<!-- -->
              console.log(error);
            });
      } else {<!-- -->
        console.log("Upload successful, no need to merge");
      }
      if (this.skip) {<!-- -->
        this.skip = false;
      }
    },
    fileComplete(rootFile) {<!-- -->
      // A root file (folder) is successfully uploaded.
      // console.log("fileComplete", rootFile);
      // console.log("A root file (folder) was successfully uploaded.");
    },
    complete() {<!-- -->
      //Upload completed.
      // console.log("complete");
    },
    filesAdded(file, fileList, event) {<!-- -->
      // console.log(file);
      file.forEach((e) => {<!-- -->
        this.fileList.push(e);
        this.computeMD5(e);
      });
    },
    computeMD5(file) {<!-- -->
      let fileReader = new FileReader();
      let time = new Date().getTime();
      let blobSlice =
          File.prototype.slice ||
          File.prototype.mozSlice ||
          File.prototype.webkitSlice;
      let currentChunk = 0;
      const chunkSize = 1024 * 1024;
      let chunks = Math.ceil(file.size / chunkSize);
      let spark = new SparkMD5.ArrayBuffer();
      //The file status is set to "Calculate MD5"
      file.cmd5 = true; //The file status is "Calculating md5..."
      file.pause();
      loadNext();
      fileReader.onload = (e) => {<!-- -->
        spark.append(e.target.result);
        if (currentChunk < chunks) {<!-- -->
          currentChunk + + ;
          loadNext();
          // Display the calculation progress of MD5 in real time
          console.log(
              `The ${<!-- -->currentChunk} fragment parsing is completed, starting the ${<!-- -->
                  currentChunk + 1
              } / ${<!-- -->chunks} fragment parsing`
          );
        } else {<!-- -->
          let md5 = spark.end();
          console.log(
              `MD5 calculation completed: ${<!-- -->file.name} \\
MD5: ${<!-- -->md5} \\
Fragmentation: ${<!-- -->chunks} Size:${<!-- -->
                  file.size
              } Time taken: ${<!-- -->new Date().getTime() - time} ms`
          );
          spark.destroy(); //Release cache
          file.uniqueIdentifier = md5; //Assign file md5 to the file's unique identifier
          file.cmd5 = false; //Cancel calculation of md5 status
          file.resume(); //Start uploading
        }
      };
      fileReader.onerror = function () {<!-- -->
        this.error(`Error reading file ${<!-- -->file.name}, please check the file`);
        file.cancel();
      };
      function loadNext() {<!-- -->
        let start = currentChunk * chunkSize;
        let end =
            start + chunkSize >= file.size ? file.size : start + chunkSize;
        fileReader.readAsArrayBuffer(blobSlice.call(file.file, start, end));
      }
    },
    allStart() {<!-- -->
      console.log(this.fileList);
      this.fileList.map((e) => {<!-- -->
        if (e.paused) {<!-- -->
          e.resume();
        }
      });
    },
    allStop() {<!-- -->
      console.log(this.fileList);
      this.fileList.map((e) => {<!-- -->
        if (!e.paused) {<!-- -->
          e.pause();
        }
      });
    },
    allRemove() {<!-- -->
      this.fileList.map((e) => {<!-- -->
        e.cancel();
      });
      this.fileList = [];
    },
  },
};
</script>
<style>
.uploader-example {<!-- -->
  width: 100%;
  padding: 15px;
  margin: 0px auto 0;
  font-size: 12px;
  box-shadow: 0 0 10px rgba(0, 0, 0, 0.4);
}
.uploader-example .uploader-btn {<!-- -->
  margin-right: 4px;
}
.uploader-example .uploader-list {<!-- -->
  max-height: 440px;
  overflow: auto;
  overflow-x: hidden;
  overflow-y: auto;
}
</style>

This concludes this article about the springboot project’s implementation of the breakpoint resume function.

Reference article: http://blog.ncmem.com/wordpress/2023/10/29/springboot project implements breakpoint resume function/
Welcome to join the group to discuss