Iterate through lines in a file with Node.js and CoffeeScript
I am iterating though the lines in a file using Node.js with CoffeScript and the following function:
each_line_in = (stream, func) ->
    fs.stat stream.path, (err, stats) ->
        previous = []
        stream.on 'data', (d) ->
            start = cur = 0
            for c in d
                cur++
                if c == 10
                    previous.push(d.slice(start, cur))
                    func previous.join('')
                    previous = []
                    start = cur
            previous.push(d.slice(start, cur)) if start != cur
Is there a better way to do this without reading the entire file into memory? And by "better" I mean more succinct, built into Node.js, faster, or more correct. If I was writing Python I would do something like this:
def each_line_in(file_obj, func):
    [ func(l) for l in file_obj ]
I saw this question which uses Peteris Krumin's "lazy" module, but I would like to accomplish this w/o adding an external dependency.
Here's a fairly efficient approach:
eachLineIn = (filePath, func) ->
  blockSize = 4096
  buffer = new Buffer(blockSize)
  fd = fs.openSync filePath, 'r'
  lastLine = ''
  callback = (err, bytesRead) ->
    throw err if err
    if bytesRead is blockSize
      fs.read fd, buffer, 0, blockSize, null, callback
    lines = buffer.toString('utf8', 0, bytesRead).split 'n'
    lines[0] = lastLine + lines[0]
    [completeLines..., lastLine] = lines
    func(line) for line in completeLines
    return
  fs.read fd, buffer, 0, blockSize, 0, callback
  return
 You should benchmark this on your hardware and OS to find the optimal value of blockSize for large files.  
 Note that this assumes that file lines are divided by n only.  If you're not sure what your files use, you should use a regex for split , eg:  
.split(/(rn)|r|n/)
 This is a succinct version using a ReadStream, eg stream = fs.createReadStream(filepath)  
for_each_line = (stream, func) ->
  last = ""
  stream.on('data', (chunk) ->
    lines = (last + chunk).split("n")
    [lines...,last] = lines
    for line in lines
      func(line)
  )
  stream.on('end', () ->
    func(last)
  )
 Options to createReadStream can set the buffer size and encoding as needed.  
This strips the 'n', but that can be added back if needed. It also handles a final line, though that will be empty if the file ends with a 'n'.
I don't get much difference in timing of these 3 versions.
链接地址: http://www.djcxy.com/p/52292.html