1
Fork 0
hare-http/net/http/transport.ha
Drew DeVault 9237448725 Updates for Hare upstream changes
Signed-off-by: Drew DeVault <sir@cmpwn.com>
2024-04-19 11:24:02 +02:00

297 lines
7 KiB
Hare

use errors;
use bufio;
use bytes;
use io;
use os;
use strconv;
use strings;
use types;
// Configures the Transport-Encoding behavior.
//
// If set to NONE, no transport decoding or encoding is performed on the message
// body, irrespective of the value of the Transport-Encoding header. The user
// must perform any required encoding or decoding themselves in this mode. If
// set to AUTO, the implementation will examine the Transport-Encoding header
// and encode the message body appropriately.
//
// Most users will want this to be set to auto.
export type transport_mode = enum {
AUTO = 0,
NONE,
};
// Configures the Content-Encoding behavior.
//
// If set to NONE, no transport decoding or encoding is performed on the message
// body, irrespective of the value of the Content-Encoding header. The user must
// perform any required encoding or decoding themselves in this mode. If set to
// AUTO, the implementation will examine the Content-Encoding header and encode
// the message body appropriately.
//
// Most users will want this to be set to AUTO.
export type content_mode = enum {
AUTO = 0,
NONE,
};
// Describes an HTTP [[client]]'s transport configuration for a given request.
//
// The default value of this type sets all parameters to "auto".
export type transport = struct {
// Desired Transport-Encoding configuration, see [[transport_mode]] for
// details.
request_transport: transport_mode,
response_transport: transport_mode,
// Desired Content-Encoding configuration, see [[content_mode]] for
// details.
request_content: content_mode,
response_content: content_mode,
};
fn new_reader(
conn: io::file,
resp: *response,
scan: *bufio::scanner,
) (*io::stream | errors::unsupported | protoerr) = {
// TODO: Content-Encoding support
const cl = header_get(&resp.header, "Content-Length");
const te = header_get(&resp.header, "Transfer-Encoding");
if (cl != "" || te == "") {
let length = types::SIZE_MAX;
if (cl != "") {
length = match (strconv::stoz(cl)) {
case let z: size =>
yield z;
case =>
return protoerr;
};
};
return new_identity_reader(conn, scan, length);
};
// TODO: Figure out the semantics for closing the stream
// The caller should probably be required to close it
// It should close/free any intermediate transport/content decoders
// And it should not close the actual connection if it's still in the
// connection pool
// Unless it isn't in the pool, then it should!
let stream: io::handle = conn;
let buffer: []u8 = bufio::scan_buffer(scan);
const iter = strings::tokenize(te, ",");
for (const tok => strings::next_token(&iter)) {
const te = strings::trim(tok);
// XXX: We could add lzw support if someone added it to
// hare-compress
const next = switch (te) {
case "chunked" =>
yield new_chunked_reader(stream, buffer);
case "deflate" =>
abort(); // TODO
case "gzip" =>
abort(); // TODO
case =>
return errors::unsupported;
};
stream = next;
buffer = [];
};
if (!(stream is *io::stream)) {
// Empty Transfer-Encoding header
return protoerr;
};
return stream as *io::stream;
};
type identity_reader = struct {
vtable: io::stream,
conn: io::file,
scan: *bufio::scanner,
src: io::limitstream,
};
const identity_reader_vtable = io::vtable {
reader = &identity_read,
closer = &identity_close,
...
};
// Creates a new reader that reads data until the response's Content-Length is
// reached; i.e. the null Transport-Encoding.
fn new_identity_reader(
conn: io::file,
scan: *bufio::scanner,
content_length: size,
) *io::stream = {
const scan = alloc(*scan);
return alloc(identity_reader {
vtable = &identity_reader_vtable,
conn = conn,
scan = scan,
src = io::limitreader(scan, content_length),
...
});
};
fn identity_read(
s: *io::stream,
buf: []u8,
) (size | io::EOF | io::error) = {
let rd = s: *identity_reader;
assert(rd.vtable == &identity_reader_vtable);
return io::read(&rd.src, buf)?;
};
fn identity_close(s: *io::stream) (void | io::error) = {
let rd = s: *identity_reader;
assert(rd.vtable == &identity_reader_vtable);
// Flush the remainder of the response in case the caller did not read
// it out entirely
io::copy(io::empty, &rd.src)?;
bufio::finish(rd.scan);
free(rd.scan);
io::close(rd.conn)?;
};
type chunk_state = enum {
HEADER,
DATA,
FOOTER,
};
type chunked_reader = struct {
vtable: io::stream,
conn: io::handle,
buffer: [os::BUFSZ]u8,
state: chunk_state,
// Amount of read-ahead data in buffer
pending: size,
// Length of current chunk
length: size,
};
fn new_chunked_reader(
conn: io::handle,
buffer: []u8,
) *io::stream = {
let rd = alloc(chunked_reader {
vtable = &chunked_reader_vtable,
conn = conn,
...
});
rd.buffer[..len(buffer)] = buffer[..];
rd.pending = len(buffer);
return rd;
};
const chunked_reader_vtable = io::vtable {
reader = &chunked_read,
...
};
fn chunked_read(
s: *io::stream,
buf: []u8,
) (size | io::EOF | io::error) = {
// XXX: I am not satisfied with this code
let rd = s: *chunked_reader;
assert(rd.vtable == &chunked_reader_vtable);
for (true) switch (rd.state) {
case chunk_state::HEADER =>
let crlf = 0z;
for (true) {
const n = rd.pending;
match (bytes::index(rd.buffer[..n], ['\r', '\n'])) {
case let z: size =>
crlf = z;
break;
case void =>
yield;
};
if (rd.pending >= len(rd.buffer)) {
// Chunk header exceeds buffer size
return errors::overflow;
};
match (io::read(rd.conn, rd.buffer[rd.pending..])?) {
case let n: size =>
rd.pending += n;
case io::EOF =>
if (rd.pending > 0) {
return errors::invalid;
};
return io::EOF;
};
};
// XXX: Should we do anything with chunk-ext?
const header = rd.buffer[..crlf];
const (ln, _) = bytes::cut(header, ';');
const ln = match (strings::fromutf8(ln)) {
case let s: str =>
yield s;
case =>
return errors::invalid;
};
match (strconv::stoz(ln, strconv::base::HEX)) {
case let z: size =>
rd.length = z;
case =>
return errors::invalid;
};
if (rd.length == 0) {
return io::EOF;
};
const n = crlf + 2;
rd.buffer[..rd.pending - n] = rd.buffer[n..rd.pending];
rd.pending -= n;
rd.state = chunk_state::DATA;
case chunk_state::DATA =>
if (rd.pending == 0) {
match (io::read(rd.conn, rd.buffer)?) {
case let n: size =>
rd.pending += n;
case io::EOF =>
return io::EOF;
};
};
let n = len(buf);
if (n > rd.pending) {
n = rd.pending;
};
if (n > rd.length) {
n = rd.length;
};
buf[..n] = rd.buffer[..n];
rd.buffer[..rd.pending - n] = rd.buffer[n..rd.pending];
rd.pending -= n;
rd.length -= n;
rd.state = chunk_state::FOOTER;
return n;
case chunk_state::FOOTER =>
for (rd.pending < 2) {
match (io::read(rd.conn, rd.buffer[rd.pending..])?) {
case let n: size =>
rd.pending += n;
case io::EOF =>
return io::EOF;
};
};
if (!bytes::equal(rd.buffer[..2], ['\r', '\n'])) {
return errors::invalid;
};
rd.buffer[..rd.pending - 2] = rd.buffer[2..rd.pending];
rd.pending -= 2;
rd.state = chunk_state::HEADER;
};
};