Large files are uploaded in parts, resumed at breakpoints, and MD5 determines whether to upload.

Install dependencies

pnpm install spark-md5
pnpm install @types/spark-md5 -D

Multiple upload and breakpoint resume function definition

CHUNK_SIZE = 10 * 1024 * 1024; // 10M
getFileMD5(file: File) {<!-- -->
    const fileReader = new FileReader();
    // Get the file fragment object (note its compatibility, it is written differently in different browsers)
    const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
    // Take the first slice and calculate it
    const start = 0;
    const end = this.CHUNK_SIZE >= file.size ? file.size : this.CHUNK_SIZE;
    fileReader.readAsBinaryString(blobSlice.call(file, start, end));

    return new Promise((resolve, reject) => {<!-- -->
        fileReader.onload = (e: any) => {<!-- -->
            resolve(SparkMD5.hashBinary(e.target.result, false));
        };
        fileReader.onerror = function () {<!-- -->
            message.error('Error reading file, please check the file!');
            reject(new Error('Error reading file, please check the file!'));
        };
    });
}

checkUploadByMD5(md5: string) {<!-- -->
    return this.get(`xxxxxxxxxx/check-upload?identifier=${<!-- -->md5}`);
}

// Upload large files by slicing
createBigFile(file: File, onProcess: any = null): any {<!-- -->
    return new Promise(async (resolve, reject) => {<!-- -->
        if (!file) {<!-- -->
            reject(new Error('The file is empty, please check!'));
        }
        const md5 = await this.getFileMD5(file);
        let uploadedChunks = []; //Chunks uploaded
        try {<!-- -->
            const checkResult: any = await this.checkUploadByMD5(`${<!-- -->md5}`);
            if (checkResult.uploaded & amp; & amp; Object.keys(checkResult.commonFile || {<!-- -->}).length) {<!-- -->
                //File has been uploaded
                resolve(checkResult.commonFile);
                return;
            }
            uploadedChunks = checkResult.uploadedChunks || []; // Shards uploaded
        } catch (err) {<!-- -->
            // If the check fails, it will be treated as not uploaded.
            console.error(err);
        }
        //Create slice
        const fileChunks = [];
        let index = 0; // slice number
        for (let cur = 0; cur < file.size; cur + = this.CHUNK_SIZE) {<!-- -->
            if (!uploadedChunks.includes( + + index)) {<!-- -->
                // This fragment has not been uploaded before
                fileChunks.push({<!-- -->
                    chunkNumber: index,
                    chunk: file.slice(cur, cur + this.CHUNK_SIZE),
                });
            }
        }
        const totalChunks = index; //Total number of slices
        //Control concurrency and breakpoint resumption
        let success = uploadedChunks.length; // Number of successes
        let percent = (success / totalChunks  0) * 100;
        onProcess & amp; & amp; onProcess({<!-- --> percent: percent.toFixed(2) });
        const processDetail: number[] = [];
        let consecutiveFailure = 0; // Number of consecutive failures
        const uploadFileChunks = async (list: {<!-- --> [key: string]: any }[]) => {<!-- -->
            const pool: any[] = []; // Concurrency pool
            const max = 3; // Maximum concurrency
            const failureMax = 3; // Maximum number of acceptable consecutive failures
            let finish = 0; // Completed quantity
            const failList: {<!-- --> [key: string]: any }[] = []; // Failed list
            for (let i = 0; i < list.length; i + + ) {<!-- -->
            if (consecutiveFailure >= failureMax) {<!-- -->
                     // Continuous failureMax times of upload failure, stop requesting
                     message.error('File upload failed, please try again later!');
                     reject(new Error('File upload failed, please try again later!'));
                     return;
                }
                const item = list[i];
                const chunkData = {<!-- -->
                    chunkNumber: item.chunkNumber,
                    totalChunks,
                    chunkSize: item.chunk.size,
                    totalSize: file.size,
                    identifier: md5,
                    filename: file.name,
                    file: item.chunk,
                };
                // Upload slices
                const task = this.postFormLarge('xxxxxxx/upload', chunkData, {<!-- -->
                    onUploadProgress: (info) => {<!-- -->
                        if (onProcess) {<!-- -->
                            const poolIndex = pool.findIndex((item) => item === task);
                            const progress = info?.progress  0;
                            processDetail[poolIndex] = progress === 1 ? 0.99 : progress; // Avoid filling up the progress before success
                            const percentNew = (processDetail.reduce((pre, cur) => pre + cur, success) / totalChunks) * 100;
                            if (percentNew > percent) {<!-- -->
                                percent = percentNew;
                                onProcess({<!-- --> percent: percentNew.toFixed(2) });
                            }
                        }
                    },
                });
                task.then((taskResult: any) => {<!-- -->
                    if (taskResult & amp; & amp; taskResult.uploadFlag) {<!-- -->
                        // After the request ends, remove the Promise task from the concurrent pool
                        const poolIndex = pool.findIndex((item) => item === task);
                        pool.splice(poolIndex, 1);
                        success + + ;
                        consecutiveFailure = 0;
                        if (onProcess) {<!-- -->
                            try {<!-- -->
                                processDetail.splice(poolIndex, 1);
                                const percentNew = (processDetail.reduce((pre, cur) => pre + cur, success) / totalChunks) * 100;
                                if (percentNew > percent) {<!-- -->
                                    percent = percentNew;
                                    onProcess({<!-- --> percent: percentNew.toFixed(2) });
                                }
                            } catch (err) {<!-- -->
                                //Catch here to avoid repeated uploading of uploaded fragments
                                console.error(err);
                            }
                        }
                    } else {<!-- -->
                    consecutiveFailure + + ;
                        failList.push(item);
                    }
                    if (Object.keys(taskResult.commonFile || {<!-- -->}).length) {<!-- -->
                        // After all the fragments are successfully uploaded, the backend returns the file information.
                        resolve(taskResult.commonFile);
                    }
                })
                    .catch(() => {<!-- -->
                    consecutiveFailure + + ;
                        failList.push(item);
                    })
                    .finally(() => {<!-- -->
                        finish + + ;
                        // All requests are requested to be completed
                        if (finish === list.length & amp; & amp; failList.length) {<!-- -->
                            uploadFileChunks(failList);
                        }
                    });
                pool.push(task);
                processDetail.push(0);
                if (pool.length === max) {<!-- -->
                    // Whenever the concurrent pool finishes running a task, another task is inserted to avoid memory leaks.
                    await Promise.race(pool);
                }
            }
        };
        uploadFileChunks(fileChunks);
    });
}

Call

const commonFile = await createBigFile(file, (e: any) => {<!-- -->
    uploadingObjs.value.percent = e?.percent || 0;
});