1
Fork 0

Finish chunked reader implementation

This commit is contained in:
Drew DeVault 2023-02-12 17:58:25 +01:00
parent 047472da93
commit 5bcc5539f5

View file

@ -1,5 +1,6 @@
use errors;
use bufio;
use bytes;
use io;
use os;
use strconv;
@ -90,10 +91,9 @@ fn new_reader(
// XXX: We could add lzw support if someone added it to
// hare-compress
switch (te) {
const next = switch (te) {
case "chunked" =>
stream = new_chunked_reader(stream, buffer);
buffer = [];
yield new_chunked_reader(stream, buffer);
case "deflate" =>
abort(); // TODO
case "gzip" =>
@ -101,6 +101,9 @@ fn new_reader(
case =>
return errors::unsupported;
};
stream = next;
buffer = [];
};
if (!(stream is *io::stream)) {
@ -177,13 +180,20 @@ fn identity_read(
return n;
};
type chunk_state = enum {
HEADER,
DATA,
FOOTER,
};
type chunked_reader = struct {
vtable: io::stream,
conn: io::handle,
buffer: [os::BUFSIZ]u8,
state: chunk_state,
// Amount of read-ahead data in buffer
pending: size,
// Length of current chunk, or zero
// Length of current chunk
length: size,
};
@ -210,5 +220,100 @@ fn chunked_read(
s: *io::stream,
buf: []u8,
) (size | io::EOF | io::error) = {
abort(); // TODO
// XXX: I am not satisfied with this code
let rd = s: *chunked_reader;
assert(rd.vtable == &chunked_reader_vtable);
for (true) switch (rd.state) {
case chunk_state::HEADER =>
let crlf = 0z;
for (true) {
const n = rd.pending;
match (bytes::index(rd.buffer[..n], ['\r', '\n'])) {
case let z: size =>
crlf = z;
break;
case void =>
yield;
};
if (rd.pending >= len(rd.buffer)) {
// Chunk header exceeds buffer size
return errors::overflow;
};
match (io::read(rd.conn, rd.buffer[rd.pending..])?) {
case let n: size =>
rd.pending += n;
case io::EOF =>
if (rd.pending > 0) {
return errors::invalid;
};
return io::EOF;
};
};
// XXX: Should we do anything with chunk-ext?
const header = rd.buffer[..crlf];
const (ln, _) = bytes::cut(header, ';');
const ln = match (strings::fromutf8(ln)) {
case let s: str =>
yield s;
case =>
return errors::invalid;
};
match (strconv::stozb(ln, strconv::base::HEX)) {
case let z: size =>
rd.length = z;
case =>
return errors::invalid;
};
if (rd.length == 0) {
return io::EOF;
};
const n = crlf + 2;
rd.buffer[..rd.pending - n] = rd.buffer[n..rd.pending];
rd.pending -= n;
rd.state = chunk_state::DATA;
case chunk_state::DATA =>
if (rd.pending == 0) {
match (io::read(rd.conn, rd.buffer)?) {
case let n: size =>
rd.pending += n;
case io::EOF =>
return io::EOF;
};
};
let n = len(buf);
if (n > rd.pending) {
n = rd.pending;
};
if (n > rd.length) {
n = rd.length;
};
buf[..n] = rd.buffer[..n];
rd.buffer[..rd.pending - n] = rd.buffer[n..rd.pending];
rd.pending -= n;
rd.length -= n;
rd.state = chunk_state::FOOTER;
return n;
case chunk_state::FOOTER =>
for (rd.pending < 2) {
match (io::read(rd.conn, rd.buffer[rd.pending..])?) {
case let n: size =>
rd.pending += n;
case io::EOF =>
return io::EOF;
};
};
if (!bytes::equal(rd.buffer[..2], ['\r', '\n'])) {
return errors::invalid;
};
rd.buffer[..rd.pending - 2] = rd.buffer[2..rd.pending];
rd.pending -= 2;
rd.state = chunk_state::HEADER;
};
abort(); // Unreachable
};