const fs = require(‘fs’);
const filename=”binary.bin”;
fs.readFile(filename, (err, data) => {
if (err) {
console.error(‘Error reading file:’, err);
return;
}
console.log(data);
// process the Buffer data using Buffer methods (e.g., slice, copy)
});
Streaming files in JavaScript
Another facet of dealing with files is streaming in chunks of data, which becomes a necessity when dealing with large files. Here’s a contrived example of writing out in streaming chunks:
const fs = require(‘fs’);
const filename=”large_file.txt”;
const chunkSize = 1024 * 1024; // (1)
const content=”This is some content to be written in chunks.”; // (2)
const fileSizeLimit = 5 * 1024 * 1024; // // (3)
let writtenBytes = 0; // (4)
const writeStream = fs.createWriteStream(filename, { highWaterMark: chunkSize }); // (5)
function writeChunk() { // (6)
const chunk = content.repeat(Math.ceil(chunkSize / content.length)); // (7)
if (writtenBytes + chunk.length fileSizeLimit) {
console.error(‘File size limit reached’);
writeStream.end();
return;
}
console.log(`Wrote chunk of size: ${chunk.length}, Total written: ${writtenBytes}`);
}
}
writeStream.on(‘error’, (err) => { // (10)
console.error(‘Error writing file:’, err);
});
writeStream.on(‘finish’, () => { // (10)
console.log(‘Finished writing file’);
});
writeChunk();
Streaming gives you more power, but you’ll notice it involves more work. The work you are doing is in setting chunk sizes and then responding to events based on the chunks. This is the essence of avoiding putting too much of a huge file into memory at once. Instead, you break it into chunks and deal with each one. Here are my notes about the interesting parts of the above write example:
We specify a chunk size in kilobytes. In this case, we have a 1MB chunk, which is how much content will be written at a time.
Here’s some fake content to write.
Now, we create a file-size limit, in this case, 5MB.
This variable tracks how many bytes we’ve written (so we can stop writing after 5MB).
We create the actual writeStream object. The highWaterMark element tells it how big the chunks are that it will accept.
The writeChunk() function is recursive. Whenever a chunk needs to be handled, it calls itself. It does this unless the file limit has been reached, in which case it exits.
Here, we are just repeating the sample text until it reaches the 1MB size.
Here’s the interesting part. If the file size is not exceeded, then we call writeStream.write(chunk):
writeStream.write(chunk) returns false if the buffer size is exceeded. That means we can’t fit more in the buffer given the size limit.
When the buffer is exceeded, the drain event occurs, handled by the first handler, which we define here with writeStream.once(‘drain’, writeChunk);. Notice that this is a recursive callback to writeChunk.
This keeps track of how much we’ve written.
This handles the case where we are done writing and finishes the stream writer with writeStream.end();.
This demonstrates adding event handlers for error and finish.
And to read it back off the disk, we can use a similar approach: