var running = false; //running用于判断是否正在计算md5
function doNormalTest( input ) { //这里假设直接将文件选择框的dom引用传入
if (running) { // 若是正在计算、不容许开始下一次计算
return;
}
var fileReader = new FileReader(), //建立FileReader实例
time;
fileReader.onload = function (e) { //FileReader的load事件,当文件读取完毕时触发
running = false;
// e.target指向上面的fileReader实例
if (file.size != e.target.result.length) { //若是二者不一致说明读取出错
alert("ERROR:Browser reported success but could not read the file until the end.");
} else {
console.log(Finished loading!success!!);
return SparkMD5.hashBinary(e.target.result); //计算md5并返回结果
}
};
fileReader.onerror = function () { //若是读取文件出错,取消读取状态并弹框报错
running = false;
alert("ERROR:FileReader onerror was triggered, maybe the browser aborted due to high memory usage.");
};
running = true;
fileReader.readAsBinaryString( input.files[0] ); //经过fileReader读取文件二进制码
};
复制代码
接下上第二种方法:
html
function doIncrementalTest( input ) { //这里假设直接将文件选择框的dom引用传入
if (running) {
return;
}
//这里须要用到File的slice( )方法,如下是兼容写法
var blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice,
file = input.files[0],
chunkSize = 2097152, // 以每片2MB大小来逐次读取
chunks = Math.ceil(file.size / chunkSize),
currentChunk = 0,
spark = new SparkMD5(), //建立SparkMD5的实例
time,
fileReader = new FileReader();
fileReader.onload = function (e) {
console("Read chunk number (currentChunk + 1) of chunks ");
spark.appendBinary(e.target.result); // append array buffer
currentChunk += 1;
if (currentChunk < chunks) {
loadNext();
} else {
running = false;
console.log("Finished loading!");
return spark.end(); // 完成计算,返回结果
}
};
fileReader.onerror = function () {
running = false;
console.log("something went wrong");
};
function loadNext() {
var start = currentChunk * chunkSize,
end = start + chunkSize >= file.size ? file.size : start + chunkSize;
fileReader.readAsBinaryString(blobSlice.call(file, start, end));
}
running = true;
loadNext();
}
复制代码