use errors; use bufio; use io; use os; use strconv; use strings; use types; // Configures the Transport-Encoding behavior. // // If set to NONE, no transport decoding or encoding is performed on the message // body, irrespective of the value of the Transport-Encoding header. The user // must perform any required encoding or decoding themselves in this mode. If // set to AUTO, the implementation will examine the Transport-Encoding header // and encode the message body appropriately. // // Most users will want this to be set to auto. export type transport_mode = enum { AUTO = 0, NONE, }; // Configures the Content-Encoding behavior. // // If set to NONE, no transport decoding or encoding is performed on the message // body, irrespective of the value of the Content-Encoding header. The user must // perform any required encoding or decoding themselves in this mode. If set to // AUTO, the implementation will examine the Content-Encoding header and encode // the message body appropriately. // // Most users will want this to be set to AUTO. export type content_mode = enum { AUTO = 0, NONE, }; // Describes an HTTP [[client]]'s transport configuration for a given request. // // The default value of this type sets all parameters to "auto". export type transport = struct { // Desired Transport-Encoding configuration, see [[transport_mode]] for // details. request_transport: transport_mode, response_transport: transport_mode, // Desired Content-Encoding configuration, see [[content_mode]] for // details. request_content: content_mode, response_content: content_mode, }; fn new_reader( conn: io::handle, resp: *response, scan: *bufio::scanner, ) (*io::stream | errors::unsupported | protoerr) = { // TODO: Content-Encoding support const cl = header_get(&resp.header, "Content-Length"); const te = header_get(&resp.header, "Transfer-Encoding"); if (cl != "" || te == "") { let length = types::SIZE_MAX; if (cl != "") { length = match (strconv::stoz(cl)) { case let z: size => yield z; case => return protoerr; }; }; const remain = bufio::scan_buffer(scan); return new_identity_reader(conn, remain, length); }; // TODO: Figure out the semantics for closing the stream // The caller should probably be required to close it // It should close/free any intermediate transport/content decoders // And it should not close the actual connection if it's still in the // connection pool // Unless it isn't in the pool, then it should! let stream: io::handle = conn; let buffer: []u8 = bufio::scan_buffer(scan); const iter = strings::tokenize(te, ","); for (true) { const te = match (strings::next_token(&iter)) { case let tok: str => yield strings::trim(tok); case void => break; }; // XXX: We could add lzw support if someone added it to // hare-compress switch (te) { case "chunked" => stream = new_chunked_reader(stream, buffer); buffer = []; case "deflate" => abort(); // TODO case "gzip" => abort(); // TODO case => return errors::unsupported; }; }; if (!(stream is *io::stream)) { // Empty Transfer-Encoding header return protoerr; }; return stream as *io::stream; }; type identity_reader = struct { vtable: io::stream, conn: io::handle, buffer: [os::BUFSIZ]u8, pending: size, length: size, }; const identity_reader_vtable = io::vtable { reader = &identity_read, ... }; // Creates a new reader that reads data until the response's Content-Length is // reached; i.e. the null Transport-Encoding. fn new_identity_reader( conn: io::handle, buffer: []u8, content_length: size, ) *io::stream = { let rd = alloc(identity_reader { vtable = &identity_reader_vtable, conn = conn, length = content_length, ... }); rd.buffer[..len(buffer)] = buffer[..]; rd.pending = len(buffer); return rd; }; fn identity_read( s: *io::stream, buf: []u8, ) (size | io::EOF | io::error) = { let rd = s: *identity_reader; assert(rd.vtable == &identity_reader_vtable); if (rd.length <= 0) { return io::EOF; }; if (rd.pending == 0) { let nread = rd.length; if (nread > len(rd.buffer)) { nread = len(rd.buffer); }; match (io::read(rd.conn, rd.buffer[..nread])?) { case let n: size => rd.pending = n; case io::EOF => return io::EOF; }; }; let n = len(buf); if (n > rd.pending) { n = rd.pending; }; buf[..n] = rd.buffer[..n]; rd.buffer[..len(rd.buffer) - n] = rd.buffer[n..]; rd.pending -= n; rd.length -= n; return n; }; type chunked_reader = struct { vtable: io::stream, conn: io::handle, buffer: [os::BUFSIZ]u8, // Amount of read-ahead data in buffer pending: size, // Length of current chunk, or zero length: size, }; fn new_chunked_reader( conn: io::handle, buffer: []u8, ) *io::stream = { let rd = alloc(chunked_reader { vtable = &chunked_reader_vtable, conn = conn, ... }); rd.buffer[..len(buffer)] = buffer[..]; rd.pending = len(buffer); return rd; }; const chunked_reader_vtable = io::vtable { reader = &chunked_read, ... }; fn chunked_read( s: *io::stream, buf: []u8, ) (size | io::EOF | io::error) = { abort(); // TODO };