Compare commits

..

No commits in common. "24f91280d9a57357f216a2e535b1d0f60e536ea8" and "367eb9b9959e731e7e48416408b24095128bed0d" have entirely different histories.

4 changed files with 39 additions and 212 deletions

View file

@ -1,95 +0,0 @@
import log
import veb
import time
import net
import x.json2
import net.http
const port = 31228
pub struct Context {
veb.Context
}
pub struct App {
pub mut:
started chan bool
}
pub fn (mut app App) before_accept_loop() {
app.started <- true
}
@['/data/:filename'; post]
fn (mut app App) data(mut ctx Context, filename string) veb.Result {
content_type := ctx.get_header(http.CommonHeader.content_type) or { return ctx.no_content() }
f := http.FileData{
filename: filename
content_type: content_type
data: ctx.req.data
}
log.info('Received ${filename} with content_type ${content_type} and length ${f.data.len}')
return ctx.json(f)
}
const svg_image_content = '<svg xmlns="http://www.w3.org/2000/svg" viewBox="-8 -308 316 316" width="316" height="316"><g fill-opacity="0" stroke="#000" xmlns="http://www.w3.org/2000/svg"><path xmlns="http://www.w3.org/2000/svg" d="M0 0 l -1.8369701987210297e-14 -100 m -1.8369701987210297e-14 -100 l -1.8369701987210297e-14 -100 l 100 -2.4492935982947064e-14 m 100 -2.4492935982947064e-14 l 100 -2.4492935982947064e-14 l 3.061616997868383e-14 100 m 3.061616997868383e-14 100 l 3.061616997868383e-14 100 l -100 3.6739403974420595e-14 m -100 3.6739403974420595e-14 l -100 3.6739403974420595e-14" stroke="#000000" stroke-width="5"></path></g></svg>'
fn test_veb_app_start() {
log.info('starting watchdog ...')
spawn fn () {
log.info('watchdog running')
time.sleep(10 * time.second)
log.info('exiting...')
exit(0)
}()
mut app := &App{}
spawn veb.run_at[App, Context](mut app, port: port)
_ := <-app.started
log.info('app started')
}
fn test_make_request() {
mut client := net.dial_tcp('127.0.0.1:${port}')!
defer { client.close() or {} }
client.write_string('POST /data/Seeker.svg HTTP/1.1\r
Host: localhost:8090\r
User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0\r
Accept: */*\r
Accept-Language: en-US,en;q=0.5\r
Accept-Encoding: gzip, deflate, br, zstd\r
Content-Type: image/svg+xml\r
Content-Length: 618\r
Origin: null\r
Connection: close\r
Sec-Fetch-Dest: empty\r
Sec-Fetch-Mode: cors\r
Sec-Fetch-Site: cross-site\r
Priority: u=4\r
\r
')! // "
time.sleep(25 * time.millisecond)
client.write_string(svg_image_content)!
mut res := []u8{}
mut buf := []u8{len: 512}
for {
read_len := client.read(mut buf) or { break }
if read_len == 0 {
break
}
res << buf[0..read_len]
}
response := res.bytestr()
assert response.starts_with('HTTP/1.1 200 OK')
assert response.contains('Content-Length: 706')
assert response.contains('Content-Type: application/json')
payload := response.all_after('\r\n\r\n')
r := json2.decode[http.FileData](payload)!
dump(r.filename)
dump(r.content_type)
assert r.filename == 'Seeker.svg'
assert r.content_type == 'image/svg+xml'
assert r.data.starts_with('<svg xmlns=')
assert r.data.ends_with('</svg>')
assert r.data == svg_image_content
}

View file

@ -134,9 +134,6 @@ mut:
pub fn (mut params RequestParams) request_done(fd int) {
params.incomplete_requests[fd] = http.Request{}
params.idx[fd] = 0
$if trace_handle_read ? {
eprintln('>>>>> fd: ${fd} | request_done.')
}
}
interface BeforeAcceptApp {
@ -243,8 +240,10 @@ fn handle_timeout(mut pv picoev.Picoev, mut params RequestParams, fd int) {
handle: fd
is_blocking: false
}
fast_send_resp(mut conn, http_408) or {}
pv.close_conn(fd)
params.request_done(fd)
}
@ -264,16 +263,19 @@ fn handle_write_file(mut pv picoev.Picoev, mut params RequestParams, fd int) {
if bytes_to_write > max_write {
bytes_to_write = max_write
}
data := unsafe { malloc(bytes_to_write) }
defer {
unsafe { free(data) }
}
mut conn := &net.TcpConn{
sock: net.tcp_socket_from_handle_raw(fd)
handle: fd
is_blocking: false
write_timeout: params.timeout_in_seconds * time.second
}
params.file_responses[fd].file.read_into_ptr(data, bytes_to_write) or {
params.file_responses[fd].done()
pv.close_conn(fd)
@ -299,14 +301,17 @@ fn handle_write_file(mut pv picoev.Picoev, mut params RequestParams, fd int) {
@[direct_array_access]
fn handle_write_string(mut pv picoev.Picoev, mut params RequestParams, fd int) {
mut bytes_to_write := int(params.string_responses[fd].str.len - params.string_responses[fd].pos)
if bytes_to_write > max_write {
bytes_to_write = max_write
}
mut conn := &net.TcpConn{
sock: net.tcp_socket_from_handle_raw(fd)
handle: fd
is_blocking: false
}
// pointer magic to start at the correct position in the buffer
data := unsafe { params.string_responses[fd].str.str + params.string_responses[fd].pos }
actual_written := send_string_ptr(mut conn, data, bytes_to_write) or {
@ -337,6 +342,7 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) {
handle: fd
is_blocking: false
}
// cap the max_read to 8KB
mut reader := io.new_buffered_reader(reader: conn, cap: max_read)
defer {
@ -344,14 +350,12 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) {
reader.free()
}
}
// take the previous incomplete request
mut req := params.incomplete_requests[fd]
// check if there is an incomplete request for this file descriptor
if params.idx[fd] == 0 {
$if trace_handle_read ? {
eprintln('>>>>> fd: ${fd} | start of request parsing')
}
// this is the start of a new request, setup the connection, and read the headers:
// set the read and write timeout according to picoev settings when the
// connection is first encountered
conn.set_read_timeout(params.timeout_in_seconds)
@ -366,7 +370,7 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) {
// the buffered reader was empty meaning that the client probably
// closed the connection.
pv.close_conn(fd)
params.request_done(fd)
params.incomplete_requests[fd] = http.Request{}
return
}
if reader.total_read >= max_read {
@ -375,45 +379,37 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) {
eprintln('[veb] error parsing request: too large')
fast_send_resp(mut conn, http_413) or {}
pv.close_conn(fd)
params.request_done(fd)
params.incomplete_requests[fd] = http.Request{}
return
}
}
if params.idx[fd] == -1 {
// this is for sure a continuation of a previous request, where the first part contained only headers;
// make sure that we are ready to accept the body and account for every byte in it, by setting the counter to 0:
params.idx[fd] = 0
$if trace_handle_read ? {
eprintln('>>>>> fd: ${fd} | continuation of request, where the first part contained headers')
}
}
// check if the request has a body
content_length := req.header.get(.content_length) or { '0' }
content_length_i := content_length.int()
if content_length_i > 0 {
if content_length.int() > 0 {
mut max_bytes_to_read := max_read - reader.total_read
mut bytes_to_read := content_length_i - params.idx[fd]
mut bytes_to_read := content_length.int() - params.idx[fd]
// cap the bytes to read to 8KB for the body, including the request headers if any
if bytes_to_read > max_read - reader.total_read {
bytes_to_read = max_read - reader.total_read
}
mut buf_ptr := params.buf
unsafe {
buf_ptr += fd * max_read // pointer magic
}
// convert to []u8 for BufferedReader
mut buf := unsafe { buf_ptr.vbytes(max_bytes_to_read) }
n := reader.read(mut buf) or {
if reader.total_read > 0 {
// The headers were parsed in this cycle, but the body has not been sent yet. No need to error.
params.idx[fd] = -1 // avoid reparsing the headers on the next call.
params.incomplete_requests[fd] = req
$if trace_handle_read ? {
eprintln('>>>>> fd: ${fd} | request headers were parsed, but the body has not been parsed yet | params.idx[fd]: ${params.idx[fd]} | content_length_i: ${content_length_i}')
}
// the headers were parsed in this cycle, but the body has not been
// sent yet. No need to error
return
}
eprintln('[veb] error reading request body: ${err}')
if err is io.Eof {
// we expect more data to be send, but an Eof error occurred, meaning
// that there is no more data to be read from the socket.
@ -427,14 +423,17 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) {
).join(headers_close)
)) or {}
}
pv.close_conn(fd)
params.request_done(fd)
params.incomplete_requests[fd] = http.Request{}
params.idx[fd] = 0
return
}
// there is no more data to be sent, but it is less than the Content-Length header
// so it is a mismatch of body length and content length.
// Or if there is more data received then the Content-Length header specified
if (n == 0 && params.idx[fd] != 0) || params.idx[fd] + n > content_length_i {
if (n == 0 && params.idx[fd] != 0) || params.idx[fd] + n > content_length.int() {
fast_send_resp(mut conn, http.new_response(
status: .bad_request
body: 'Mismatch of body length and Content-Length header'
@ -443,31 +442,29 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) {
value: 'text/plain'
).join(headers_close)
)) or {}
pv.close_conn(fd)
params.request_done(fd)
params.incomplete_requests[fd] = http.Request{}
params.idx[fd] = 0
return
} else if n < bytes_to_read || params.idx[fd] + n < content_length_i {
} else if n < bytes_to_read || params.idx[fd] + n < content_length.int() {
// request is incomplete wait until the socket becomes ready to read again
params.idx[fd] += n
// TODO: change this to a memcpy function?
req.data += buf[0..n].bytestr()
params.incomplete_requests[fd] = req
params.idx[fd] += n
$if trace_handle_read ? {
eprintln('>>>>> request is NOT complete, fd: ${fd} | n: ${n} | req.data.len: ${req.data.len} | params.idx[fd]: ${params.idx[fd]}')
}
return
} else {
// request is complete: n = bytes_to_read
req.data += buf[0..n].bytestr()
params.idx[fd] += n
$if trace_handle_read ? {
eprintln('>>>>> request is NOW COMPLETE, fd: ${fd} | n: ${n} | req.data.len: ${req.data.len}')
}
req.data += buf[0..n].bytestr()
}
}
defer {
params.request_done(fd)
}
if completed_context := handle_request[A, X](mut conn, req, params) {
if completed_context.takeover {
// the connection should be kept open, but removed from the picoev loop.
@ -476,11 +473,13 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) {
pv.delete(fd)
return
}
// TODO: At this point the Context can safely be freed when this function returns.
// The user will have to clone the context if the context object should be kept.
// defer {
// completed_context.free()
// }
match completed_context.return_type {
.normal {
// small optimization: if the response is small write it immediately

View file

@ -412,41 +412,11 @@ fn (mut decoder Decoder) decode_value[T](mut val T) ! {
string_buffer << `\t`
}
`u` {
unicode_point := rune(strconv.parse_uint(decoder.json[
string_buffer << rune(strconv.parse_uint(decoder.json[
string_info.position + string_index..string_info.position +
string_index + 4], 16, 32)!)
string_index + 4], 16, 32)!).bytes()
string_index += 4
if unicode_point < 0xD800 { // normal utf-8
string_buffer << unicode_point.bytes()
} else if unicode_point >= 0xDC00 { // trail surrogate -> invalid
decoder.decode_error('Got trail surrogate: ${u32(unicode_point):04X} before head surrogate.')!
} else { // head surrogate -> treat as utf-16
if string_index > string_info.length - 6 {
decoder.decode_error('Expected a trail surrogate after a head surrogate, but got no valid escape sequence.')!
}
if decoder.json[string_info.position + string_index..
string_info.position + string_index + 2] != '\\u' {
decoder.decode_error('Expected a trail surrogate after a head surrogate, but got no valid escape sequence.')!
}
string_index += 2
unicode_point2 := rune(strconv.parse_uint(decoder.json[
string_info.position + string_index..string_info.position +
string_index + 4], 16, 32)!)
string_index += 4
if unicode_point2 < 0xDC00 {
decoder.decode_error('Expected a trail surrogate after a head surrogate, but got ${u32(unicode_point):04X}.')!
}
final_unicode_point := (unicode_point2 & 0x3FF) +
((unicode_point & 0x3FF) << 10) + 0x10000
string_buffer << final_unicode_point.bytes()
}
}
else {} // has already been checked
}

View file

@ -10,50 +10,3 @@ fn test_decode_escaped_string() {
assert escaped_strings == decoded_strings
}
fn test_surrogate() {
assert decoder2.decode[string](r'"\ud83d\ude00"')! == '😀'
assert decoder2.decode[string](r'"\ud83d\ude00 text"')! == '😀 text'
}
fn test_invalid_surrogate() {
if x := decoder2.decode[string](r'"\ud83d"') {
assert false
} else {
if err is decoder2.JsonDecodeError {
assert err.line == 1
assert err.character == 1
assert err.message == 'Data: Expected a trail surrogate after a head surrogate, but got no valid escape sequence.'
}
}
if x := decoder2.decode[string](r'"\ud83d\n\n\n\n"') {
assert false
} else {
if err is decoder2.JsonDecodeError {
assert err.line == 1
assert err.character == 1
assert err.message == 'Data: Expected a trail surrogate after a head surrogate, but got no valid escape sequence.'
}
}
if x := decoder2.decode[string](r'"\ud83d\ud83d"') {
assert false
} else {
if err is decoder2.JsonDecodeError {
assert err.line == 1
assert err.character == 1
assert err.message == 'Data: Expected a trail surrogate after a head surrogate, but got D83D.'
}
}
if x := decoder2.decode[string](r'"\ude00\ud83d"') {
assert false
} else {
if err is decoder2.JsonDecodeError {
assert err.line == 1
assert err.character == 1
assert err.message == 'Data: Got trail surrogate: DE00 before head surrogate.'
}
}
}