Squashed 'src/beast/beast/http/impl/http-parser/' changes from cba704c..fd65b0f

fd65b0f src: refactor method parsing
678a9e2 test: Assert against correct error messages
e2e467b Update http-parser to 2.6.1
4e382f9 readme: fix build status badge
bee4817 Bump version to 2.6.0
777ba4e src: introduce `http_parser_url_init`
483eca7 doc: updated README.md to include multi-threading example
e557b62 src: support LINK/UNLINK (RFC 2068, draft-snell-link-method)
e01811e src: fixed compile error C2143 for vs2012
b36c2a9 header: treat Wine like MinGW
eb5e992 src: support ACL (WebDAV, RFC3744, Section 8.1).
4f69be2 readme: update WebSocket link to RFC6455
b5bcca8 test: `SEARCH`, `PURGE` and `MKCALENDAR`
8b1d652 src: support BIND/REBIND/UNBIND (WebDAV, RFC5842)
7d75dd7 src: support IPv6 Zone ID as per RFC 6874
ab0b162 src: use ARRAY_SIZE instead of sizeof()
39ff097 src: remove double check
f6f436a src: fix invalid memory access in http_parse_host
2896229 make: fix dynamic library extension for OS X
39c2c1e Bump version to 2.5.0
dff604d src: support body in Upgrade requests
d767545 src: callbacks chunk boundaries: header/complete
2872cb7 test: regression test for incomplete/corrupted hdr
5d414fc makefile: add un/install targets
d547f3b url_parser: remove mixed declarations
7ecf775 src: partially revert 959f4cb to fix nread value
7ba3123 header: fix field sizes
53063b7 Add function to initialize http_parser_settings
1b31580 Bump version to 2.4.2
59569f2 src: skip lws between `connection` values
36f107f Bump version to 2.4.1
280af69 src: fix build on MSVC
956c8a0 Bump version to 2.4.0
167dcdf readme: fix typo
3f7ef50 src: annotate with likely/unlikely
265f9d0 bench: add chunked bytes
091ebb8 src: simple Connection header multi-value parsing
959f4cb src: remove reexecute goto
0097de5 src: use memchr() in h_general header value
c6097e1 src: faster general header value loop
2630060 src: less loads in header_value loop
0cb0ee6 src: tighten header field/value loops
6132d1f src: save progress
3f1a05a benchmark: initial
94a55d1 send travis irc notifications to #node-ci
5fd51fd Fix warning on test suite found by Clang Analyzer
0b43367 http_parser: Follow RFC-7230 Sec 3.2.4
11ecb42 Docs fix
7bbb774 doc: add very basic docs for `http_parser_execute`
17ed7de header: typo fix in a comment
5b951d7 src: fix clang warning
1317eec Added support for MKCALENDAR
08a2cc3 very minor spelling/grammar changes in README.md
158dd3b signing the CLA is no longer a requirement
8d9e5db fix typo in README comment
d19e129 contrib: fixed resource leak in parsertrace
24e2d2d Allow HTTP_MAX_HEADER_SIZE to be defined externally
56f7ad0 Bump version to 2.3.0
76f0f16 Fix issues around multi-line headers
5d9c382 Include separating ws when folding header values

git-subtree-dir: src/beast/beast/http/impl/http-parser
git-subtree-split: fd65b0fbbdb405425a14d0e49f5366667550b1c2
This commit is contained in:
Vinnie Falco
2016-03-04 12:26:41 -05:00
parent 6c0edd2190
commit 404d58d77c
13 changed files with 1504 additions and 394 deletions

3
.gitignore vendored
View File

@@ -5,12 +5,15 @@ tags
test test
test_g test_g
test_fast test_fast
bench
url_parser url_parser
parsertrace parsertrace
parsertrace_g parsertrace_g
*.mk *.mk
*.Makefile *.Makefile
*.so.* *.so.*
*.exe.*
*.exe
*.a *.a

View File

@@ -5,3 +5,4 @@ Salman Haq <salman.haq@asti-usa.com>
Simon Zimmermann <simonz05@gmail.com> Simon Zimmermann <simonz05@gmail.com>
Thomas LE ROUX <thomas@november-eleven.fr> LE ROUX Thomas <thomas@procheo.fr> Thomas LE ROUX <thomas@november-eleven.fr> LE ROUX Thomas <thomas@procheo.fr>
Thomas LE ROUX <thomas@november-eleven.fr> Thomas LE ROUX <thomas@procheo.fr> Thomas LE ROUX <thomas@november-eleven.fr> Thomas LE ROUX <thomas@procheo.fr>
Fedor Indutny <fedor@indutny.com>

View File

@@ -10,4 +10,4 @@ script:
notifications: notifications:
email: false email: false
irc: irc:
- "irc.freenode.net#libuv" - "irc.freenode.net#node-ci"

21
AUTHORS
View File

@@ -39,11 +39,30 @@ BogDan Vatra <bogdan@kde.org>
Peter Faiman <peter@thepicard.org> Peter Faiman <peter@thepicard.org>
Corey Richardson <corey@octayn.net> Corey Richardson <corey@octayn.net>
Tóth Tamás <tomika_nospam@freemail.hu> Tóth Tamás <tomika_nospam@freemail.hu>
Patrik Stutz <patrik.stutz@gmail.com>
Cam Swords <cam.swords@gmail.com> Cam Swords <cam.swords@gmail.com>
Chris Dickinson <christopher.s.dickinson@gmail.com> Chris Dickinson <christopher.s.dickinson@gmail.com>
Uli Köhler <ukoehler@btronik.de> Uli Köhler <ukoehler@btronik.de>
Charlie Somerville <charlie@charliesomerville.com> Charlie Somerville <charlie@charliesomerville.com>
Patrik Stutz <patrik.stutz@gmail.com>
Fedor Indutny <fedor.indutny@gmail.com> Fedor Indutny <fedor.indutny@gmail.com>
runner <runner.mei@gmail.com> runner <runner.mei@gmail.com>
Alexis Campailla <alexis@janeasystems.com> Alexis Campailla <alexis@janeasystems.com>
David Wragg <david@wragg.org>
Vinnie Falco <vinnie.falco@gmail.com>
Alex Butum <alexbutum@linux.com>
Rex Feng <rexfeng@gmail.com>
Alex Kocharin <alex@kocharin.ru>
Mark Koopman <markmontymark@yahoo.com>
Helge Heß <me@helgehess.eu>
Alexis La Goutte <alexis.lagoutte@gmail.com>
George Miroshnykov <george.miroshnykov@gmail.com>
Maciej Małecki <me@mmalecki.com>
Marc O'Morain <github.com@marcomorain.com>
Jeff Pinner <jpinner@twitter.com>
Timothy J Fontaine <tjfontaine@gmail.com>
Akagi201 <akagi201@gmail.com>
Romain Giraud <giraud.romain@gmail.com>
Jay Satiro <raysatiro@yahoo.com>
Arne Steen <Arne.Steen@gmx.de>
Kjell Schubert <kjell.schubert@gmail.com>
Olivier Mengué <dolmen@cpan.org>

View File

@@ -1,4 +0,0 @@
Contributors must agree to the Contributor License Agreement before patches
can be accepted.
http://spreadsheets2.google.com/viewform?hl=en&formkey=dDJXOGUwbzlYaWM4cHN1MERwQS1CSnc6MQ

View File

@@ -19,32 +19,54 @@
# IN THE SOFTWARE. # IN THE SOFTWARE.
PLATFORM ?= $(shell sh -c 'uname -s | tr "[A-Z]" "[a-z]"') PLATFORM ?= $(shell sh -c 'uname -s | tr "[A-Z]" "[a-z]"')
SONAME ?= libhttp_parser.so.2.2.1 HELPER ?=
BINEXT ?=
ifeq (darwin,$(PLATFORM))
SONAME ?= libhttp_parser.2.6.1.dylib
SOEXT ?= dylib
else ifeq (wine,$(PLATFORM))
CC = winegcc
BINEXT = .exe.so
HELPER = wine
else
SONAME ?= libhttp_parser.so.2.6.1
SOEXT ?= so
endif
CC?=gcc CC?=gcc
AR?=ar AR?=ar
CPPFLAGS ?=
LDFLAGS ?=
CPPFLAGS += -I. CPPFLAGS += -I.
CPPFLAGS_DEBUG = $(CPPFLAGS) -DHTTP_PARSER_STRICT=1 CPPFLAGS_DEBUG = $(CPPFLAGS) -DHTTP_PARSER_STRICT=1
CPPFLAGS_DEBUG += $(CPPFLAGS_DEBUG_EXTRA) CPPFLAGS_DEBUG += $(CPPFLAGS_DEBUG_EXTRA)
CPPFLAGS_FAST = $(CPPFLAGS) -DHTTP_PARSER_STRICT=0 CPPFLAGS_FAST = $(CPPFLAGS) -DHTTP_PARSER_STRICT=0
CPPFLAGS_FAST += $(CPPFLAGS_FAST_EXTRA) CPPFLAGS_FAST += $(CPPFLAGS_FAST_EXTRA)
CPPFLAGS_BENCH = $(CPPFLAGS_FAST)
CFLAGS += -Wall -Wextra -Werror CFLAGS += -Wall -Wextra -Werror
CFLAGS_DEBUG = $(CFLAGS) -O0 -g $(CFLAGS_DEBUG_EXTRA) CFLAGS_DEBUG = $(CFLAGS) -O0 -g $(CFLAGS_DEBUG_EXTRA)
CFLAGS_FAST = $(CFLAGS) -O3 $(CFLAGS_FAST_EXTRA) CFLAGS_FAST = $(CFLAGS) -O3 $(CFLAGS_FAST_EXTRA)
CFLAGS_BENCH = $(CFLAGS_FAST) -Wno-unused-parameter
CFLAGS_LIB = $(CFLAGS_FAST) -fPIC CFLAGS_LIB = $(CFLAGS_FAST) -fPIC
LDFLAGS_LIB = $(LDFLAGS) -shared LDFLAGS_LIB = $(LDFLAGS) -shared
INSTALL ?= install
PREFIX ?= $(DESTDIR)/usr/local
LIBDIR = $(PREFIX)/lib
INCLUDEDIR = $(PREFIX)/include
ifneq (darwin,$(PLATFORM)) ifneq (darwin,$(PLATFORM))
# TODO(bnoordhuis) The native SunOS linker expects -h rather than -soname... # TODO(bnoordhuis) The native SunOS linker expects -h rather than -soname...
LDFLAGS_LIB += -Wl,-soname=$(SONAME) LDFLAGS_LIB += -Wl,-soname=$(SONAME)
endif endif
test: test_g test_fast test: test_g test_fast
./test_g $(HELPER) ./test_g$(BINEXT)
./test_fast $(HELPER) ./test_fast$(BINEXT)
test_g: http_parser_g.o test_g.o test_g: http_parser_g.o test_g.o
$(CC) $(CFLAGS_DEBUG) $(LDFLAGS) http_parser_g.o test_g.o -o $@ $(CC) $(CFLAGS_DEBUG) $(LDFLAGS) http_parser_g.o test_g.o -o $@
@@ -61,11 +83,17 @@ test_fast: http_parser.o test.o http_parser.h
test.o: test.c http_parser.h Makefile test.o: test.c http_parser.h Makefile
$(CC) $(CPPFLAGS_FAST) $(CFLAGS_FAST) -c test.c -o $@ $(CC) $(CPPFLAGS_FAST) $(CFLAGS_FAST) -c test.c -o $@
bench: http_parser.o bench.o
$(CC) $(CFLAGS_BENCH) $(LDFLAGS) http_parser.o bench.o -o $@
bench.o: bench.c http_parser.h Makefile
$(CC) $(CPPFLAGS_BENCH) $(CFLAGS_BENCH) -c bench.c -o $@
http_parser.o: http_parser.c http_parser.h Makefile http_parser.o: http_parser.c http_parser.h Makefile
$(CC) $(CPPFLAGS_FAST) $(CFLAGS_FAST) -c http_parser.c $(CC) $(CPPFLAGS_FAST) $(CFLAGS_FAST) -c http_parser.c
test-run-timed: test_fast test-run-timed: test_fast
while(true) do time ./test_fast > /dev/null; done while(true) do time $(HELPER) ./test_fast$(BINEXT) > /dev/null; done
test-valgrind: test_g test-valgrind: test_g
valgrind ./test_g valgrind ./test_g
@@ -86,20 +114,36 @@ url_parser_g: http_parser_g.o contrib/url_parser.c
$(CC) $(CPPFLAGS_DEBUG) $(CFLAGS_DEBUG) $^ -o $@ $(CC) $(CPPFLAGS_DEBUG) $(CFLAGS_DEBUG) $^ -o $@
parsertrace: http_parser.o contrib/parsertrace.c parsertrace: http_parser.o contrib/parsertrace.c
$(CC) $(CPPFLAGS_FAST) $(CFLAGS_FAST) $^ -o parsertrace $(CC) $(CPPFLAGS_FAST) $(CFLAGS_FAST) $^ -o parsertrace$(BINEXT)
parsertrace_g: http_parser_g.o contrib/parsertrace.c parsertrace_g: http_parser_g.o contrib/parsertrace.c
$(CC) $(CPPFLAGS_DEBUG) $(CFLAGS_DEBUG) $^ -o parsertrace_g $(CC) $(CPPFLAGS_DEBUG) $(CFLAGS_DEBUG) $^ -o parsertrace_g$(BINEXT)
tags: http_parser.c http_parser.h test.c tags: http_parser.c http_parser.h test.c
ctags $^ ctags $^
install: library
$(INSTALL) -D http_parser.h $(INCLUDEDIR)/http_parser.h
$(INSTALL) -D $(SONAME) $(LIBDIR)/$(SONAME)
ln -s $(LIBDIR)/$(SONAME) $(LIBDIR)/libhttp_parser.$(SOEXT)
install-strip: library
$(INSTALL) -D http_parser.h $(INCLUDEDIR)/http_parser.h
$(INSTALL) -D -s $(SONAME) $(LIBDIR)/$(SONAME)
ln -s $(LIBDIR)/$(SONAME) $(LIBDIR)/libhttp_parser.$(SOEXT)
uninstall:
rm $(INCLUDEDIR)/http_parser.h
rm $(LIBDIR)/$(SONAME)
rm $(LIBDIR)/libhttp_parser.so
clean: clean:
rm -f *.o *.a tags test test_fast test_g \ rm -f *.o *.a tags test test_fast test_g \
http_parser.tar libhttp_parser.so.* \ http_parser.tar libhttp_parser.so.* \
url_parser url_parser_g parsertrace parsertrace_g url_parser url_parser_g parsertrace parsertrace_g \
*.exe *.exe.so
contrib/url_parser.c: http_parser.h contrib/url_parser.c: http_parser.h
contrib/parsertrace.c: http_parser.h contrib/parsertrace.c: http_parser.h
.PHONY: clean package test-run test-run-timed test-valgrind .PHONY: clean package test-run test-run-timed test-valgrind install install-strip uninstall

View File

@@ -1,7 +1,7 @@
HTTP Parser HTTP Parser
=========== ===========
[![Build Status](https://travis-ci.org/joyent/http-parser.png?branch=master)](https://travis-ci.org/joyent/http-parser) [![Build Status](https://api.travis-ci.org/nodejs/http-parser.svg?branch=master)](https://travis-ci.org/nodejs/http-parser)
This is a parser for HTTP messages written in C. It parses both requests and This is a parser for HTTP messages written in C. It parses both requests and
responses. The parser is designed to be used in performance HTTP responses. The parser is designed to be used in performance HTTP
@@ -61,7 +61,7 @@ if (recved < 0) {
} }
/* Start up / continue the parser. /* Start up / continue the parser.
* Note we pass recved==0 to signal that EOF has been recieved. * Note we pass recved==0 to signal that EOF has been received.
*/ */
nparsed = http_parser_execute(parser, &settings, buf, recved); nparsed = http_parser_execute(parser, &settings, buf, recved);
@@ -75,7 +75,7 @@ if (parser->upgrade) {
HTTP needs to know where the end of the stream is. For example, sometimes HTTP needs to know where the end of the stream is. For example, sometimes
servers send responses without Content-Length and expect the client to servers send responses without Content-Length and expect the client to
consume input (for the body) until EOF. To tell http_parser about EOF, give consume input (for the body) until EOF. To tell http_parser about EOF, give
`0` as the forth parameter to `http_parser_execute()`. Callbacks and errors `0` as the fourth parameter to `http_parser_execute()`. Callbacks and errors
can still be encountered during an EOF, so one must still be prepared can still be encountered during an EOF, so one must still be prepared
to receive them. to receive them.
@@ -94,7 +94,7 @@ The Special Problem of Upgrade
------------------------------ ------------------------------
HTTP supports upgrading the connection to a different protocol. An HTTP supports upgrading the connection to a different protocol. An
increasingly common example of this is the Web Socket protocol which sends increasingly common example of this is the WebSocket protocol which sends
a request like a request like
GET /demo HTTP/1.1 GET /demo HTTP/1.1
@@ -106,11 +106,11 @@ a request like
followed by non-HTTP data. followed by non-HTTP data.
(See http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75 for more (See [RFC6455](https://tools.ietf.org/html/rfc6455) for more information the
information the Web Socket protocol.) WebSocket protocol.)
To support this, the parser will treat this as a normal HTTP message without a To support this, the parser will treat this as a normal HTTP message without a
body. Issuing both on_headers_complete and on_message_complete callbacks. However body, issuing both on_headers_complete and on_message_complete callbacks. However
http_parser_execute() will stop parsing at the end of the headers and return. http_parser_execute() will stop parsing at the end of the headers and return.
The user is expected to check if `parser->upgrade` has been set to 1 after The user is expected to check if `parser->upgrade` has been set to 1 after
@@ -131,12 +131,75 @@ There are two types of callbacks:
* notification `typedef int (*http_cb) (http_parser*);` * notification `typedef int (*http_cb) (http_parser*);`
Callbacks: on_message_begin, on_headers_complete, on_message_complete. Callbacks: on_message_begin, on_headers_complete, on_message_complete.
* data `typedef int (*http_data_cb) (http_parser*, const char *at, size_t length);` * data `typedef int (*http_data_cb) (http_parser*, const char *at, size_t length);`
Callbacks: (requests only) on_uri, Callbacks: (requests only) on_url,
(common) on_header_field, on_header_value, on_body; (common) on_header_field, on_header_value, on_body;
Callbacks must return 0 on success. Returning a non-zero value indicates Callbacks must return 0 on success. Returning a non-zero value indicates
error to the parser, making it exit immediately. error to the parser, making it exit immediately.
For cases where it is necessary to pass local information to/from a callback,
the `http_parser` object's `data` field can be used.
An example of such a case is when using threads to handle a socket connection,
parse a request, and then give a response over that socket. By instantiation
of a thread-local struct containing relevant data (e.g. accepted socket,
allocated memory for callbacks to write into, etc), a parser's callbacks are
able to communicate data between the scope of the thread and the scope of the
callback in a threadsafe manner. This allows http-parser to be used in
multi-threaded contexts.
Example:
```
typedef struct {
socket_t sock;
void* buffer;
int buf_len;
} custom_data_t;
int my_url_callback(http_parser* parser, const char *at, size_t length) {
/* access to thread local custom_data_t struct.
Use this access save parsed data for later use into thread local
buffer, or communicate over socket
*/
parser->data;
...
return 0;
}
...
void http_parser_thread(socket_t sock) {
int nparsed = 0;
/* allocate memory for user data */
custom_data_t *my_data = malloc(sizeof(custom_data_t));
/* some information for use by callbacks.
* achieves thread -> callback information flow */
my_data->sock = sock;
/* instantiate a thread-local parser */
http_parser *parser = malloc(sizeof(http_parser));
http_parser_init(parser, HTTP_REQUEST); /* initialise parser */
/* this custom data reference is accessible through the reference to the
parser supplied to callback functions */
parser->data = my_data;
http_parser_settings settings; / * set up callbacks */
settings.on_url = my_url_callback;
/* execute parser */
nparsed = http_parser_execute(parser, &settings, buf, recved);
...
/* parsed information copied from callback.
can now perform action on data copied into thread-local memory from callbacks.
achieves callback -> thread information flow */
my_data->buffer;
...
}
```
In case you parse HTTP message in chunks (i.e. `read()` request line In case you parse HTTP message in chunks (i.e. `read()` request line
from socket, parse, read half headers, parse, etc) your data callbacks from socket, parse, read half headers, parse, etc) your data callbacks
may be called more than once. Http-parser guarantees that data pointer is only may be called more than once. Http-parser guarantees that data pointer is only
@@ -145,7 +208,7 @@ buffer to avoid copying memory around if this fits your application.
Reading headers may be a tricky task if you read/parse headers partially. Reading headers may be a tricky task if you read/parse headers partially.
Basically, you need to remember whether last header callback was field or value Basically, you need to remember whether last header callback was field or value
and apply following logic: and apply the following logic:
(on_header_field and on_header_value shortened to on_h_*) (on_header_field and on_header_value shortened to on_h_*)
------------------------ ------------ -------------------------------------------- ------------------------ ------------ --------------------------------------------

111
bench.c Normal file
View File

@@ -0,0 +1,111 @@
/* Copyright Fedor Indutny. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "http_parser.h"
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
static const char data[] =
"POST /joyent/http-parser HTTP/1.1\r\n"
"Host: github.com\r\n"
"DNT: 1\r\n"
"Accept-Encoding: gzip, deflate, sdch\r\n"
"Accept-Language: ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4\r\n"
"User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/39.0.2171.65 Safari/537.36\r\n"
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,"
"image/webp,*/*;q=0.8\r\n"
"Referer: https://github.com/joyent/http-parser\r\n"
"Connection: keep-alive\r\n"
"Transfer-Encoding: chunked\r\n"
"Cache-Control: max-age=0\r\n\r\nb\r\nhello world\r\n0\r\n\r\n";
static const size_t data_len = sizeof(data) - 1;
static int on_info(http_parser* p) {
return 0;
}
static int on_data(http_parser* p, const char *at, size_t length) {
return 0;
}
static http_parser_settings settings = {
.on_message_begin = on_info,
.on_headers_complete = on_info,
.on_message_complete = on_info,
.on_header_field = on_data,
.on_header_value = on_data,
.on_url = on_data,
.on_status = on_data,
.on_body = on_data
};
int bench(int iter_count, int silent) {
struct http_parser parser;
int i;
int err;
struct timeval start;
struct timeval end;
float rps;
if (!silent) {
err = gettimeofday(&start, NULL);
assert(err == 0);
}
for (i = 0; i < iter_count; i++) {
size_t parsed;
http_parser_init(&parser, HTTP_REQUEST);
parsed = http_parser_execute(&parser, &settings, data, data_len);
assert(parsed == data_len);
}
if (!silent) {
err = gettimeofday(&end, NULL);
assert(err == 0);
fprintf(stdout, "Benchmark result:\n");
rps = (float) (end.tv_sec - start.tv_sec) +
(end.tv_usec - start.tv_usec) * 1e-6f;
fprintf(stdout, "Took %f seconds to run\n", rps);
rps = (float) iter_count / rps;
fprintf(stdout, "%f req/sec\n", rps);
fflush(stdout);
}
return 0;
}
int main(int argc, char** argv) {
if (argc == 2 && strcmp(argv[1], "infinite") == 0) {
for (;;)
bench(5000000, 1);
return 0;
} else {
return bench(5000000, 0);
}
}

View File

@@ -111,14 +111,14 @@ int main(int argc, char* argv[]) {
FILE* file = fopen(filename, "r"); FILE* file = fopen(filename, "r");
if (file == NULL) { if (file == NULL) {
perror("fopen"); perror("fopen");
return EXIT_FAILURE; goto fail;
} }
fseek(file, 0, SEEK_END); fseek(file, 0, SEEK_END);
long file_length = ftell(file); long file_length = ftell(file);
if (file_length == -1) { if (file_length == -1) {
perror("ftell"); perror("ftell");
return EXIT_FAILURE; goto fail;
} }
fseek(file, 0, SEEK_SET); fseek(file, 0, SEEK_SET);
@@ -126,7 +126,7 @@ int main(int argc, char* argv[]) {
if (fread(data, 1, file_length, file) != (size_t)file_length) { if (fread(data, 1, file_length, file) != (size_t)file_length) {
fprintf(stderr, "couldn't read entire file\n"); fprintf(stderr, "couldn't read entire file\n");
free(data); free(data);
return EXIT_FAILURE; goto fail;
} }
http_parser_settings settings; http_parser_settings settings;
@@ -149,8 +149,12 @@ int main(int argc, char* argv[]) {
"Error: %s (%s)\n", "Error: %s (%s)\n",
http_errno_description(HTTP_PARSER_ERRNO(&parser)), http_errno_description(HTTP_PARSER_ERRNO(&parser)),
http_errno_name(HTTP_PARSER_ERRNO(&parser))); http_errno_name(HTTP_PARSER_ERRNO(&parser)));
return EXIT_FAILURE; goto fail;
} }
return EXIT_SUCCESS; return EXIT_SUCCESS;
fail:
fclose(file);
return EXIT_FAILURE;
} }

View File

@@ -14,7 +14,7 @@ dump_url (const char *url, const struct http_parser_url *u)
continue; continue;
} }
printf("\tfield_data[%u]: off: %u len: %u part: \"%.*s\n", printf("\tfield_data[%u]: off: %u, len: %u, part: %.*s\n",
i, i,
u->field_data[i].off, u->field_data[i].off,
u->field_data[i].len, u->field_data[i].len,
@@ -24,16 +24,19 @@ dump_url (const char *url, const struct http_parser_url *u)
} }
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
struct http_parser_url u;
int len, connect, result;
if (argc != 3) { if (argc != 3) {
printf("Syntax : %s connect|get url\n", argv[0]); printf("Syntax : %s connect|get url\n", argv[0]);
return 1; return 1;
} }
struct http_parser_url u; len = strlen(argv[2]);
int len = strlen(argv[2]); connect = strcmp("connect", argv[1]) == 0 ? 1 : 0;
int connect = strcmp("connect", argv[1]) == 0 ? 1 : 0;
printf("Parsing %s, connect %d\n", argv[2], connect); printf("Parsing %s, connect %d\n", argv[2], connect);
int result = http_parser_parse_url(argv[2], len, connect, &u); http_parser_url_init(&u);
result = http_parser_parse_url(argv[2], len, connect, &u);
if (result != 0) { if (result != 0) {
printf("Parse error : %d\n", result); printf("Parse error : %d\n", result);
return result; return result;

File diff suppressed because it is too large Load Diff

View File

@@ -26,11 +26,12 @@ extern "C" {
/* Also update SONAME in the Makefile whenever you change these. */ /* Also update SONAME in the Makefile whenever you change these. */
#define HTTP_PARSER_VERSION_MAJOR 2 #define HTTP_PARSER_VERSION_MAJOR 2
#define HTTP_PARSER_VERSION_MINOR 2 #define HTTP_PARSER_VERSION_MINOR 6
#define HTTP_PARSER_VERSION_PATCH 1 #define HTTP_PARSER_VERSION_PATCH 1
#include <sys/types.h> #include <sys/types.h>
#if defined(_WIN32) && !defined(__MINGW32__) && (!defined(_MSC_VER) || _MSC_VER<1600) #if defined(_WIN32) && !defined(__MINGW32__) && \
(!defined(_MSC_VER) || _MSC_VER<1600) && !defined(__WINE__)
#include <BaseTsd.h> #include <BaseTsd.h>
#include <stddef.h> #include <stddef.h>
typedef __int8 int8_t; typedef __int8 int8_t;
@@ -52,9 +53,16 @@ typedef unsigned __int64 uint64_t;
# define HTTP_PARSER_STRICT 1 # define HTTP_PARSER_STRICT 1
#endif #endif
/* Maximium header size allowed */ /* Maximium header size allowed. If the macro is not defined
#define HTTP_MAX_HEADER_SIZE (80*1024) * before including this header then the default is used. To
* change the maximum header size, define the macro in the build
* environment (e.g. -DHTTP_MAX_HEADER_SIZE=<value>). To remove
* the effective limit on the size of the header, define the macro
* to a very large number (e.g. -DHTTP_MAX_HEADER_SIZE=0x7fffffff)
*/
#ifndef HTTP_MAX_HEADER_SIZE
# define HTTP_MAX_HEADER_SIZE (80*1024)
#endif
typedef struct http_parser http_parser; typedef struct http_parser http_parser;
typedef struct http_parser_settings http_parser_settings; typedef struct http_parser_settings http_parser_settings;
@@ -69,7 +77,7 @@ typedef struct http_parser_settings http_parser_settings;
* HEAD request which may contain 'Content-Length' or 'Transfer-Encoding: * HEAD request which may contain 'Content-Length' or 'Transfer-Encoding:
* chunked' headers that indicate the presence of a body. * chunked' headers that indicate the presence of a body.
* *
* http_data_cb does not return data chunks. It will be call arbitrarally * http_data_cb does not return data chunks. It will be called arbitrarily
* many times for each string. E.G. you might get 10 callbacks for "on_url" * many times for each string. E.G. you might get 10 callbacks for "on_url"
* each providing just a few characters more data. * each providing just a few characters more data.
*/ */
@@ -88,7 +96,7 @@ typedef int (*http_cb) (http_parser*);
XX(5, CONNECT, CONNECT) \ XX(5, CONNECT, CONNECT) \
XX(6, OPTIONS, OPTIONS) \ XX(6, OPTIONS, OPTIONS) \
XX(7, TRACE, TRACE) \ XX(7, TRACE, TRACE) \
/* webdav */ \ /* WebDAV */ \
XX(8, COPY, COPY) \ XX(8, COPY, COPY) \
XX(9, LOCK, LOCK) \ XX(9, LOCK, LOCK) \
XX(10, MKCOL, MKCOL) \ XX(10, MKCOL, MKCOL) \
@@ -97,19 +105,28 @@ typedef int (*http_cb) (http_parser*);
XX(13, PROPPATCH, PROPPATCH) \ XX(13, PROPPATCH, PROPPATCH) \
XX(14, SEARCH, SEARCH) \ XX(14, SEARCH, SEARCH) \
XX(15, UNLOCK, UNLOCK) \ XX(15, UNLOCK, UNLOCK) \
XX(16, BIND, BIND) \
XX(17, REBIND, REBIND) \
XX(18, UNBIND, UNBIND) \
XX(19, ACL, ACL) \
/* subversion */ \ /* subversion */ \
XX(16, REPORT, REPORT) \ XX(20, REPORT, REPORT) \
XX(17, MKACTIVITY, MKACTIVITY) \ XX(21, MKACTIVITY, MKACTIVITY) \
XX(18, CHECKOUT, CHECKOUT) \ XX(22, CHECKOUT, CHECKOUT) \
XX(19, MERGE, MERGE) \ XX(23, MERGE, MERGE) \
/* upnp */ \ /* upnp */ \
XX(20, MSEARCH, M-SEARCH) \ XX(24, MSEARCH, M-SEARCH) \
XX(21, NOTIFY, NOTIFY) \ XX(25, NOTIFY, NOTIFY) \
XX(22, SUBSCRIBE, SUBSCRIBE) \ XX(26, SUBSCRIBE, SUBSCRIBE) \
XX(23, UNSUBSCRIBE, UNSUBSCRIBE) \ XX(27, UNSUBSCRIBE, UNSUBSCRIBE) \
/* RFC-5789 */ \ /* RFC-5789 */ \
XX(24, PATCH, PATCH) \ XX(28, PATCH, PATCH) \
XX(25, PURGE, PURGE) \ XX(29, PURGE, PURGE) \
/* CalDAV */ \
XX(30, MKCALENDAR, MKCALENDAR) \
/* RFC-2068, section 19.6.1.2 */ \
XX(31, LINK, LINK) \
XX(32, UNLINK, UNLINK) \
enum http_method enum http_method
{ {
@@ -127,9 +144,11 @@ enum flags
{ F_CHUNKED = 1 << 0 { F_CHUNKED = 1 << 0
, F_CONNECTION_KEEP_ALIVE = 1 << 1 , F_CONNECTION_KEEP_ALIVE = 1 << 1
, F_CONNECTION_CLOSE = 1 << 2 , F_CONNECTION_CLOSE = 1 << 2
, F_TRAILING = 1 << 3 , F_CONNECTION_UPGRADE = 1 << 3
, F_UPGRADE = 1 << 4 , F_TRAILING = 1 << 4
, F_SKIPBODY = 1 << 5 , F_UPGRADE = 1 << 5
, F_SKIPBODY = 1 << 6
, F_CONTENTLENGTH = 1 << 7
}; };
@@ -150,6 +169,8 @@ enum flags
XX(CB_body, "the on_body callback failed") \ XX(CB_body, "the on_body callback failed") \
XX(CB_message_complete, "the on_message_complete callback failed") \ XX(CB_message_complete, "the on_message_complete callback failed") \
XX(CB_status, "the on_status callback failed") \ XX(CB_status, "the on_status callback failed") \
XX(CB_chunk_header, "the on_chunk_header callback failed") \
XX(CB_chunk_complete, "the on_chunk_complete callback failed") \
\ \
/* Parsing-related errors */ \ /* Parsing-related errors */ \
XX(INVALID_EOF_STATE, "stream ended at an unexpected time") \ XX(INVALID_EOF_STATE, "stream ended at an unexpected time") \
@@ -170,6 +191,8 @@ enum flags
XX(INVALID_HEADER_TOKEN, "invalid character in header") \ XX(INVALID_HEADER_TOKEN, "invalid character in header") \
XX(INVALID_CONTENT_LENGTH, \ XX(INVALID_CONTENT_LENGTH, \
"invalid character in content-length header") \ "invalid character in content-length header") \
XX(UNEXPECTED_CONTENT_LENGTH, \
"unexpected content-length header") \
XX(INVALID_CHUNK_SIZE, \ XX(INVALID_CHUNK_SIZE, \
"invalid character in chunk size header") \ "invalid character in chunk size header") \
XX(INVALID_CONSTANT, "invalid constant string") \ XX(INVALID_CONSTANT, "invalid constant string") \
@@ -194,10 +217,11 @@ enum http_errno {
struct http_parser { struct http_parser {
/** PRIVATE **/ /** PRIVATE **/
unsigned int type : 2; /* enum http_parser_type */ unsigned int type : 2; /* enum http_parser_type */
unsigned int flags : 6; /* F_* values from 'flags' enum; semi-public */ unsigned int flags : 8; /* F_* values from 'flags' enum; semi-public */
unsigned int state : 8; /* enum state from http_parser.c */ unsigned int state : 7; /* enum state from http_parser.c */
unsigned int header_state : 8; /* enum header_state from http_parser.c */ unsigned int header_state : 7; /* enum header_state from http_parser.c */
unsigned int index : 8; /* index into current matcher */ unsigned int index : 7; /* index into current matcher */
unsigned int lenient_http_headers : 1;
uint32_t nread; /* # bytes read in various scenarios */ uint32_t nread; /* # bytes read in various scenarios */
uint64_t content_length; /* # bytes in body (0 if no Content-Length header) */ uint64_t content_length; /* # bytes in body (0 if no Content-Length header) */
@@ -230,6 +254,11 @@ struct http_parser_settings {
http_cb on_headers_complete; http_cb on_headers_complete;
http_data_cb on_body; http_data_cb on_body;
http_cb on_message_complete; http_cb on_message_complete;
/* When on_chunk_header is called, the current chunk length is stored
* in parser->content_length.
*/
http_cb on_chunk_header;
http_cb on_chunk_complete;
}; };
@@ -271,13 +300,20 @@ struct http_parser_url {
* unsigned major = (version >> 16) & 255; * unsigned major = (version >> 16) & 255;
* unsigned minor = (version >> 8) & 255; * unsigned minor = (version >> 8) & 255;
* unsigned patch = version & 255; * unsigned patch = version & 255;
* printf("http_parser v%u.%u.%u\n", major, minor, version); * printf("http_parser v%u.%u.%u\n", major, minor, patch);
*/ */
unsigned long http_parser_version(void); unsigned long http_parser_version(void);
void http_parser_init(http_parser *parser, enum http_parser_type type); void http_parser_init(http_parser *parser, enum http_parser_type type);
/* Initialize http_parser_settings members to 0
*/
void http_parser_settings_init(http_parser_settings *settings);
/* Executes the parser. Returns number of parsed bytes. Sets
* `parser->http_errno` on error. */
size_t http_parser_execute(http_parser *parser, size_t http_parser_execute(http_parser *parser,
const http_parser_settings *settings, const http_parser_settings *settings,
const char *data, const char *data,
@@ -301,6 +337,9 @@ const char *http_errno_name(enum http_errno err);
/* Return a string description of the given error */ /* Return a string description of the given error */
const char *http_errno_description(enum http_errno err); const char *http_errno_description(enum http_errno err);
/* Initialize all http_parser_url members to 0 */
void http_parser_url_init(struct http_parser_url *u);
/* Parse a URL; return nonzero on failure */ /* Parse a URL; return nonzero on failure */
int http_parser_parse_url(const char *buf, size_t buflen, int http_parser_parse_url(const char *buf, size_t buflen,
int is_connect, int is_connect,

620
test.c
View File

@@ -39,6 +39,7 @@
#define MAX_HEADERS 13 #define MAX_HEADERS 13
#define MAX_ELEMENT_SIZE 2048 #define MAX_ELEMENT_SIZE 2048
#define MAX_CHUNKS 16
#define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MIN(a,b) ((a) < (b) ? (a) : (b))
@@ -65,6 +66,10 @@ struct message {
char headers [MAX_HEADERS][2][MAX_ELEMENT_SIZE]; char headers [MAX_HEADERS][2][MAX_ELEMENT_SIZE];
int should_keep_alive; int should_keep_alive;
int num_chunks;
int num_chunks_complete;
int chunk_lengths[MAX_CHUNKS];
const char *upgrade; // upgraded body const char *upgrade; // upgraded body
unsigned short http_major; unsigned short http_major;
@@ -301,6 +306,8 @@ const struct message requests[] =
{ { "Transfer-Encoding" , "chunked" } { { "Transfer-Encoding" , "chunked" }
} }
,.body= "all your base are belong to us" ,.body= "all your base are belong to us"
,.num_chunks_complete= 2
,.chunk_lengths= { 0x1e }
} }
#define TWO_CHUNKS_MULT_ZERO_END 9 #define TWO_CHUNKS_MULT_ZERO_END 9
@@ -327,6 +334,8 @@ const struct message requests[] =
{ { "Transfer-Encoding", "chunked" } { { "Transfer-Encoding", "chunked" }
} }
,.body= "hello world" ,.body= "hello world"
,.num_chunks_complete= 3
,.chunk_lengths= { 5, 6 }
} }
#define CHUNKED_W_TRAILING_HEADERS 10 #define CHUNKED_W_TRAILING_HEADERS 10
@@ -357,6 +366,8 @@ const struct message requests[] =
, { "Content-Type", "text/plain" } , { "Content-Type", "text/plain" }
} }
,.body= "hello world" ,.body= "hello world"
,.num_chunks_complete= 3
,.chunk_lengths= { 5, 6 }
} }
#define CHUNKED_W_BULLSHIT_AFTER_LENGTH 11 #define CHUNKED_W_BULLSHIT_AFTER_LENGTH 11
@@ -383,6 +394,8 @@ const struct message requests[] =
{ { "Transfer-Encoding", "chunked" } { { "Transfer-Encoding", "chunked" }
} }
,.body= "hello world" ,.body= "hello world"
,.num_chunks_complete= 3
,.chunk_lengths= { 5, 6 }
} }
#define WITH_QUOTES 12 #define WITH_QUOTES 12
@@ -608,8 +621,14 @@ const struct message requests[] =
" mno \r\n" " mno \r\n"
"\t \tqrs\r\n" "\t \tqrs\r\n"
"Line2: \t line2\t\r\n" "Line2: \t line2\t\r\n"
"Line3:\r\n"
" line3\r\n"
"Line4: \r\n"
" \r\n"
"Connection:\r\n"
" close\r\n"
"\r\n" "\r\n"
,.should_keep_alive= TRUE ,.should_keep_alive= FALSE
,.message_complete_on_eof= FALSE ,.message_complete_on_eof= FALSE
,.http_major= 1 ,.http_major= 1
,.http_minor= 1 ,.http_minor= 1
@@ -618,9 +637,12 @@ const struct message requests[] =
,.fragment= "" ,.fragment= ""
,.request_path= "/" ,.request_path= "/"
,.request_url= "/" ,.request_url= "/"
,.num_headers= 2 ,.num_headers= 5
,.headers= { { "Line1", "abcdefghijklmno qrs" } ,.headers= { { "Line1", "abc\tdef ghi\t\tjkl mno \t \tqrs" }
, { "Line2", "line2\t" } , { "Line2", "line2\t" }
, { "Line3", "line3" }
, { "Line4", "" }
, { "Connection", "close" },
} }
,.body= "" ,.body= ""
} }
@@ -904,6 +926,232 @@ const struct message requests[] =
,.body= "" ,.body= ""
} }
#define LINE_FOLDING_IN_HEADER_WITH_LF 34
, {.name= "line folding in header value"
,.type= HTTP_REQUEST
,.raw= "GET / HTTP/1.1\n"
"Line1: abc\n"
"\tdef\n"
" ghi\n"
"\t\tjkl\n"
" mno \n"
"\t \tqrs\n"
"Line2: \t line2\t\n"
"Line3:\n"
" line3\n"
"Line4: \n"
" \n"
"Connection:\n"
" close\n"
"\n"
,.should_keep_alive= FALSE
,.message_complete_on_eof= FALSE
,.http_major= 1
,.http_minor= 1
,.method= HTTP_GET
,.query_string= ""
,.fragment= ""
,.request_path= "/"
,.request_url= "/"
,.num_headers= 5
,.headers= { { "Line1", "abc\tdef ghi\t\tjkl mno \t \tqrs" }
, { "Line2", "line2\t" }
, { "Line3", "line3" }
, { "Line4", "" }
, { "Connection", "close" },
}
,.body= ""
}
#define CONNECTION_MULTI 35
, {.name = "multiple connection header values with folding"
,.type= HTTP_REQUEST
,.raw= "GET /demo HTTP/1.1\r\n"
"Host: example.com\r\n"
"Connection: Something,\r\n"
" Upgrade, ,Keep-Alive\r\n"
"Sec-WebSocket-Key2: 12998 5 Y3 1 .P00\r\n"
"Sec-WebSocket-Protocol: sample\r\n"
"Upgrade: WebSocket\r\n"
"Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5\r\n"
"Origin: http://example.com\r\n"
"\r\n"
"Hot diggity dogg"
,.should_keep_alive= TRUE
,.message_complete_on_eof= FALSE
,.http_major= 1
,.http_minor= 1
,.method= HTTP_GET
,.query_string= ""
,.fragment= ""
,.request_path= "/demo"
,.request_url= "/demo"
,.num_headers= 7
,.upgrade="Hot diggity dogg"
,.headers= { { "Host", "example.com" }
, { "Connection", "Something, Upgrade, ,Keep-Alive" }
, { "Sec-WebSocket-Key2", "12998 5 Y3 1 .P00" }
, { "Sec-WebSocket-Protocol", "sample" }
, { "Upgrade", "WebSocket" }
, { "Sec-WebSocket-Key1", "4 @1 46546xW%0l 1 5" }
, { "Origin", "http://example.com" }
}
,.body= ""
}
#define CONNECTION_MULTI_LWS 36
, {.name = "multiple connection header values with folding and lws"
,.type= HTTP_REQUEST
,.raw= "GET /demo HTTP/1.1\r\n"
"Connection: keep-alive, upgrade\r\n"
"Upgrade: WebSocket\r\n"
"\r\n"
"Hot diggity dogg"
,.should_keep_alive= TRUE
,.message_complete_on_eof= FALSE
,.http_major= 1
,.http_minor= 1
,.method= HTTP_GET
,.query_string= ""
,.fragment= ""
,.request_path= "/demo"
,.request_url= "/demo"
,.num_headers= 2
,.upgrade="Hot diggity dogg"
,.headers= { { "Connection", "keep-alive, upgrade" }
, { "Upgrade", "WebSocket" }
}
,.body= ""
}
#define CONNECTION_MULTI_LWS_CRLF 37
, {.name = "multiple connection header values with folding and lws"
,.type= HTTP_REQUEST
,.raw= "GET /demo HTTP/1.1\r\n"
"Connection: keep-alive, \r\n upgrade\r\n"
"Upgrade: WebSocket\r\n"
"\r\n"
"Hot diggity dogg"
,.should_keep_alive= TRUE
,.message_complete_on_eof= FALSE
,.http_major= 1
,.http_minor= 1
,.method= HTTP_GET
,.query_string= ""
,.fragment= ""
,.request_path= "/demo"
,.request_url= "/demo"
,.num_headers= 2
,.upgrade="Hot diggity dogg"
,.headers= { { "Connection", "keep-alive, upgrade" }
, { "Upgrade", "WebSocket" }
}
,.body= ""
}
#define UPGRADE_POST_REQUEST 38
, {.name = "upgrade post request"
,.type= HTTP_REQUEST
,.raw= "POST /demo HTTP/1.1\r\n"
"Host: example.com\r\n"
"Connection: Upgrade\r\n"
"Upgrade: HTTP/2.0\r\n"
"Content-Length: 15\r\n"
"\r\n"
"sweet post body"
"Hot diggity dogg"
,.should_keep_alive= TRUE
,.message_complete_on_eof= FALSE
,.http_major= 1
,.http_minor= 1
,.method= HTTP_POST
,.request_path= "/demo"
,.request_url= "/demo"
,.num_headers= 4
,.upgrade="Hot diggity dogg"
,.headers= { { "Host", "example.com" }
, { "Connection", "Upgrade" }
, { "Upgrade", "HTTP/2.0" }
, { "Content-Length", "15" }
}
,.body= "sweet post body"
}
#define CONNECT_WITH_BODY_REQUEST 39
, {.name = "connect with body request"
,.type= HTTP_REQUEST
,.raw= "CONNECT foo.bar.com:443 HTTP/1.0\r\n"
"User-agent: Mozilla/1.1N\r\n"
"Proxy-authorization: basic aGVsbG86d29ybGQ=\r\n"
"Content-Length: 10\r\n"
"\r\n"
"blarfcicle"
,.should_keep_alive= FALSE
,.message_complete_on_eof= FALSE
,.http_major= 1
,.http_minor= 0
,.method= HTTP_CONNECT
,.request_url= "foo.bar.com:443"
,.num_headers= 3
,.upgrade="blarfcicle"
,.headers= { { "User-agent", "Mozilla/1.1N" }
, { "Proxy-authorization", "basic aGVsbG86d29ybGQ=" }
, { "Content-Length", "10" }
}
,.body= ""
}
/* Examples from the Internet draft for LINK/UNLINK methods:
* https://tools.ietf.org/id/draft-snell-link-method-01.html#rfc.section.5
*/
#define LINK_REQUEST 40
, {.name = "link request"
,.type= HTTP_REQUEST
,.raw= "LINK /images/my_dog.jpg HTTP/1.1\r\n"
"Host: example.com\r\n"
"Link: <http://example.com/profiles/joe>; rel=\"tag\"\r\n"
"Link: <http://example.com/profiles/sally>; rel=\"tag\"\r\n"
"\r\n"
,.should_keep_alive= TRUE
,.message_complete_on_eof= FALSE
,.http_major= 1
,.http_minor= 1
,.method= HTTP_LINK
,.request_path= "/images/my_dog.jpg"
,.request_url= "/images/my_dog.jpg"
,.query_string= ""
,.fragment= ""
,.num_headers= 3
,.headers= { { "Host", "example.com" }
, { "Link", "<http://example.com/profiles/joe>; rel=\"tag\"" }
, { "Link", "<http://example.com/profiles/sally>; rel=\"tag\"" }
}
,.body= ""
}
#define UNLINK_REQUEST 41
, {.name = "link request"
,.type= HTTP_REQUEST
,.raw= "UNLINK /images/my_dog.jpg HTTP/1.1\r\n"
"Host: example.com\r\n"
"Link: <http://example.com/profiles/sally>; rel=\"tag\"\r\n"
"\r\n"
,.should_keep_alive= TRUE
,.message_complete_on_eof= FALSE
,.http_major= 1
,.http_minor= 1
,.method= HTTP_UNLINK
,.request_path= "/images/my_dog.jpg"
,.request_url= "/images/my_dog.jpg"
,.query_string= ""
,.fragment= ""
,.num_headers= 2
,.headers= { { "Host", "example.com" }
, { "Link", "<http://example.com/profiles/sally>; rel=\"tag\"" }
}
,.body= ""
}
, {.name= NULL } /* sentinel */ , {.name= NULL } /* sentinel */
}; };
@@ -1064,7 +1312,8 @@ const struct message responses[] =
,.body = ,.body =
"This is the data in the first chunk\r\n" "This is the data in the first chunk\r\n"
"and this is the second one\r\n" "and this is the second one\r\n"
,.num_chunks_complete= 3
,.chunk_lengths= { 0x25, 0x1c }
} }
#define NO_CARRIAGE_RET 5 #define NO_CARRIAGE_RET 5
@@ -1218,6 +1467,8 @@ const struct message responses[] =
, { "Connection", "close" } , { "Connection", "close" }
} }
,.body= "" ,.body= ""
,.num_chunks_complete= 1
,.chunk_lengths= {}
} }
#define NON_ASCII_IN_STATUS_LINE 10 #define NON_ASCII_IN_STATUS_LINE 10
@@ -1400,6 +1651,7 @@ const struct message responses[] =
} }
,.body_size= 0 ,.body_size= 0
,.body= "" ,.body= ""
,.num_chunks_complete= 1
} }
#if !HTTP_PARSER_STRICT #if !HTTP_PARSER_STRICT
@@ -1473,6 +1725,8 @@ const struct message responses[] =
, { "Transfer-Encoding", "chunked" } , { "Transfer-Encoding", "chunked" }
} }
,.body= "\n" ,.body= "\n"
,.num_chunks_complete= 2
,.chunk_lengths= { 1 }
} }
#define EMPTY_REASON_PHRASE_AFTER_SPACE 20 #define EMPTY_REASON_PHRASE_AFTER_SPACE 20
@@ -1708,6 +1962,35 @@ response_status_cb (http_parser *p, const char *buf, size_t len)
return 0; return 0;
} }
int
chunk_header_cb (http_parser *p)
{
assert(p == parser);
int chunk_idx = messages[num_messages].num_chunks;
messages[num_messages].num_chunks++;
if (chunk_idx < MAX_CHUNKS) {
messages[num_messages].chunk_lengths[chunk_idx] = p->content_length;
}
return 0;
}
int
chunk_complete_cb (http_parser *p)
{
assert(p == parser);
/* Here we want to verify that each chunk_header_cb is matched by a
* chunk_complete_cb, so not only should the total number of calls to
* both callbacks be the same, but they also should be interleaved
* properly */
assert(messages[num_messages].num_chunks ==
messages[num_messages].num_chunks_complete + 1);
messages[num_messages].num_chunks_complete++;
return 0;
}
/* These dontcall_* callbacks exist so that we can verify that when we're /* These dontcall_* callbacks exist so that we can verify that when we're
* paused, no additional callbacks are invoked */ * paused, no additional callbacks are invoked */
int int
@@ -1776,6 +2059,23 @@ dontcall_response_status_cb (http_parser *p, const char *buf, size_t len)
abort(); abort();
} }
int
dontcall_chunk_header_cb (http_parser *p)
{
if (p) { } // gcc
fprintf(stderr, "\n\n*** on_chunk_header() called on paused parser ***\n\n");
exit(1);
}
int
dontcall_chunk_complete_cb (http_parser *p)
{
if (p) { } // gcc
fprintf(stderr, "\n\n*** on_chunk_complete() "
"called on paused parser ***\n\n");
exit(1);
}
static http_parser_settings settings_dontcall = static http_parser_settings settings_dontcall =
{.on_message_begin = dontcall_message_begin_cb {.on_message_begin = dontcall_message_begin_cb
,.on_header_field = dontcall_header_field_cb ,.on_header_field = dontcall_header_field_cb
@@ -1785,6 +2085,8 @@ static http_parser_settings settings_dontcall =
,.on_body = dontcall_body_cb ,.on_body = dontcall_body_cb
,.on_headers_complete = dontcall_headers_complete_cb ,.on_headers_complete = dontcall_headers_complete_cb
,.on_message_complete = dontcall_message_complete_cb ,.on_message_complete = dontcall_message_complete_cb
,.on_chunk_header = dontcall_chunk_header_cb
,.on_chunk_complete = dontcall_chunk_complete_cb
}; };
/* These pause_* callbacks always pause the parser and just invoke the regular /* These pause_* callbacks always pause the parser and just invoke the regular
@@ -1855,6 +2157,22 @@ pause_response_status_cb (http_parser *p, const char *buf, size_t len)
return response_status_cb(p, buf, len); return response_status_cb(p, buf, len);
} }
int
pause_chunk_header_cb (http_parser *p)
{
http_parser_pause(p, 1);
*current_pause_parser = settings_dontcall;
return chunk_header_cb(p);
}
int
pause_chunk_complete_cb (http_parser *p)
{
http_parser_pause(p, 1);
*current_pause_parser = settings_dontcall;
return chunk_complete_cb(p);
}
static http_parser_settings settings_pause = static http_parser_settings settings_pause =
{.on_message_begin = pause_message_begin_cb {.on_message_begin = pause_message_begin_cb
,.on_header_field = pause_header_field_cb ,.on_header_field = pause_header_field_cb
@@ -1864,6 +2182,8 @@ static http_parser_settings settings_pause =
,.on_body = pause_body_cb ,.on_body = pause_body_cb
,.on_headers_complete = pause_headers_complete_cb ,.on_headers_complete = pause_headers_complete_cb
,.on_message_complete = pause_message_complete_cb ,.on_message_complete = pause_message_complete_cb
,.on_chunk_header = pause_chunk_header_cb
,.on_chunk_complete = pause_chunk_complete_cb
}; };
static http_parser_settings settings = static http_parser_settings settings =
@@ -1875,6 +2195,8 @@ static http_parser_settings settings =
,.on_body = body_cb ,.on_body = body_cb
,.on_headers_complete = headers_complete_cb ,.on_headers_complete = headers_complete_cb
,.on_message_complete = message_complete_cb ,.on_message_complete = message_complete_cb
,.on_chunk_header = chunk_header_cb
,.on_chunk_complete = chunk_complete_cb
}; };
static http_parser_settings settings_count_body = static http_parser_settings settings_count_body =
@@ -1886,6 +2208,8 @@ static http_parser_settings settings_count_body =
,.on_body = count_body_cb ,.on_body = count_body_cb
,.on_headers_complete = headers_complete_cb ,.on_headers_complete = headers_complete_cb
,.on_message_complete = message_complete_cb ,.on_message_complete = message_complete_cb
,.on_chunk_header = chunk_header_cb
,.on_chunk_complete = chunk_complete_cb
}; };
static http_parser_settings settings_null = static http_parser_settings settings_null =
@@ -1897,6 +2221,8 @@ static http_parser_settings settings_null =
,.on_body = 0 ,.on_body = 0
,.on_headers_complete = 0 ,.on_headers_complete = 0
,.on_message_complete = 0 ,.on_message_complete = 0
,.on_chunk_header = 0
,.on_chunk_complete = 0
}; };
void void
@@ -2065,6 +2391,12 @@ message_eq (int index, const struct message *expected)
MESSAGE_CHECK_STR_EQ(expected, m, body); MESSAGE_CHECK_STR_EQ(expected, m, body);
} }
assert(m->num_chunks == m->num_chunks_complete);
MESSAGE_CHECK_NUM_EQ(expected, m, num_chunks_complete);
for (i = 0; i < m->num_chunks && i < MAX_CHUNKS; i++) {
MESSAGE_CHECK_NUM_EQ(expected, m, chunk_lengths[i]);
}
MESSAGE_CHECK_NUM_EQ(expected, m, num_headers); MESSAGE_CHECK_NUM_EQ(expected, m, num_headers);
int r; int r;
@@ -2161,7 +2493,6 @@ print_error (const char *raw, size_t error_location)
break; break;
case '\n': case '\n':
char_len = 2;
fprintf(stderr, "\\n\n"); fprintf(stderr, "\\n\n");
if (this_line) goto print; if (this_line) goto print;
@@ -2639,6 +2970,59 @@ const struct url_test url_tests[] =
,.rv=1 /* s_dead */ ,.rv=1 /* s_dead */
} }
, {.name="ipv6 address with Zone ID"
,.url="http://[fe80::a%25eth0]/"
,.is_connect=0
,.u=
{.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PATH)
,.port=0
,.field_data=
{{ 0, 4 } /* UF_SCHEMA */
,{ 8, 14 } /* UF_HOST */
,{ 0, 0 } /* UF_PORT */
,{ 23, 1 } /* UF_PATH */
,{ 0, 0 } /* UF_QUERY */
,{ 0, 0 } /* UF_FRAGMENT */
,{ 0, 0 } /* UF_USERINFO */
}
}
,.rv=0
}
, {.name="ipv6 address with Zone ID, but '%' is not percent-encoded"
,.url="http://[fe80::a%eth0]/"
,.is_connect=0
,.u=
{.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PATH)
,.port=0
,.field_data=
{{ 0, 4 } /* UF_SCHEMA */
,{ 8, 12 } /* UF_HOST */
,{ 0, 0 } /* UF_PORT */
,{ 21, 1 } /* UF_PATH */
,{ 0, 0 } /* UF_QUERY */
,{ 0, 0 } /* UF_FRAGMENT */
,{ 0, 0 } /* UF_USERINFO */
}
}
,.rv=0
}
, {.name="ipv6 address ending with '%'"
,.url="http://[fe80::a%]/"
,.rv=1 /* s_dead */
}
, {.name="ipv6 address with Zone ID including bad character"
,.url="http://[fe80::a%$HOME]/"
,.rv=1 /* s_dead */
}
, {.name="just ipv6 Zone ID"
,.url="http://[%eth0]/"
,.rv=1 /* s_dead */
}
#if HTTP_PARSER_STRICT #if HTTP_PARSER_STRICT
, {.name="tab in URL" , {.name="tab in URL"
@@ -2779,7 +3163,7 @@ test_message (const struct message *message)
if (msg1len) { if (msg1len) {
read = parse(msg1, msg1len); read = parse(msg1, msg1len);
if (message->upgrade && parser->upgrade) { if (message->upgrade && parser->upgrade && num_messages > 0) {
messages[num_messages - 1].upgrade = msg1 + read; messages[num_messages - 1].upgrade = msg1 + read;
goto test; goto test;
} }
@@ -2864,15 +3248,11 @@ test_simple (const char *buf, enum http_errno err_expected)
{ {
parser_init(HTTP_REQUEST); parser_init(HTTP_REQUEST);
size_t parsed;
int pass;
enum http_errno err; enum http_errno err;
parsed = parse(buf, strlen(buf)); parse(buf, strlen(buf));
pass = (parsed == strlen(buf));
err = HTTP_PARSER_ERRNO(parser); err = HTTP_PARSER_ERRNO(parser);
parsed = parse(NULL, 0); parse(NULL, 0);
pass &= (parsed == 0);
parser_free(); parser_free();
@@ -2890,6 +3270,155 @@ test_simple (const char *buf, enum http_errno err_expected)
} }
} }
void
test_invalid_header_content (int req, const char* str)
{
http_parser parser;
http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
size_t parsed;
const char *buf;
buf = req ?
"GET / HTTP/1.1\r\n" :
"HTTP/1.1 200 OK\r\n";
parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
assert(parsed == strlen(buf));
buf = str;
size_t buflen = strlen(buf);
parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
if (parsed != buflen) {
assert(HTTP_PARSER_ERRNO(&parser) == HPE_INVALID_HEADER_TOKEN);
return;
}
fprintf(stderr,
"\n*** Error expected but none in invalid header content test ***\n");
abort();
}
void
test_invalid_header_field_content_error (int req)
{
test_invalid_header_content(req, "Foo: F\01ailure");
test_invalid_header_content(req, "Foo: B\02ar");
}
void
test_invalid_header_field (int req, const char* str)
{
http_parser parser;
http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
size_t parsed;
const char *buf;
buf = req ?
"GET / HTTP/1.1\r\n" :
"HTTP/1.1 200 OK\r\n";
parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
assert(parsed == strlen(buf));
buf = str;
size_t buflen = strlen(buf);
parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
if (parsed != buflen) {
assert(HTTP_PARSER_ERRNO(&parser) == HPE_INVALID_HEADER_TOKEN);
return;
}
fprintf(stderr,
"\n*** Error expected but none in invalid header token test ***\n");
abort();
}
void
test_invalid_header_field_token_error (int req)
{
test_invalid_header_field(req, "Fo@: Failure");
test_invalid_header_field(req, "Foo\01\test: Bar");
}
void
test_double_content_length_error (int req)
{
http_parser parser;
http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
size_t parsed;
const char *buf;
buf = req ?
"GET / HTTP/1.1\r\n" :
"HTTP/1.1 200 OK\r\n";
parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
assert(parsed == strlen(buf));
buf = "Content-Length: 0\r\nContent-Length: 1\r\n\r\n";
size_t buflen = strlen(buf);
parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
if (parsed != buflen) {
assert(HTTP_PARSER_ERRNO(&parser) == HPE_UNEXPECTED_CONTENT_LENGTH);
return;
}
fprintf(stderr,
"\n*** Error expected but none in double content-length test ***\n");
abort();
}
void
test_chunked_content_length_error (int req)
{
http_parser parser;
http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
size_t parsed;
const char *buf;
buf = req ?
"GET / HTTP/1.1\r\n" :
"HTTP/1.1 200 OK\r\n";
parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
assert(parsed == strlen(buf));
buf = "Transfer-Encoding: chunked\r\nContent-Length: 1\r\n\r\n";
size_t buflen = strlen(buf);
parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
if (parsed != buflen) {
assert(HTTP_PARSER_ERRNO(&parser) == HPE_UNEXPECTED_CONTENT_LENGTH);
return;
}
fprintf(stderr,
"\n*** Error expected but none in chunked content-length test ***\n");
abort();
}
void
test_header_cr_no_lf_error (int req)
{
http_parser parser;
http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
size_t parsed;
const char *buf;
buf = req ?
"GET / HTTP/1.1\r\n" :
"HTTP/1.1 200 OK\r\n";
parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
assert(parsed == strlen(buf));
buf = "Foo: 1\rBar: 1\r\n\r\n";
size_t buflen = strlen(buf);
parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
if (parsed != buflen) {
assert(HTTP_PARSER_ERRNO(&parser) == HPE_LF_EXPECTED);
return;
}
fprintf(stderr,
"\n*** Error expected but none in header whitespace test ***\n");
abort();
}
void void
test_header_overflow_error (int req) test_header_overflow_error (int req)
{ {
@@ -2918,6 +3447,22 @@ test_header_overflow_error (int req)
abort(); abort();
} }
void
test_header_nread_value ()
{
http_parser parser;
http_parser_init(&parser, HTTP_REQUEST);
size_t parsed;
const char *buf;
buf = "GET / HTTP/1.1\r\nheader: value\nhdr: value\r\n";
parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
assert(parsed == strlen(buf));
assert(parser.nread == strlen(buf));
}
static void static void
test_content_length_overflow (const char *buf, size_t buflen, int expect_ok) test_content_length_overflow (const char *buf, size_t buflen, int expect_ok)
{ {
@@ -3284,6 +3829,9 @@ main (void)
test_parse_url(); test_parse_url();
test_method_str(); test_method_str();
//// NREAD
test_header_nread_value();
//// OVERFLOW CONDITIONS //// OVERFLOW CONDITIONS
test_header_overflow_error(HTTP_REQUEST); test_header_overflow_error(HTTP_REQUEST);
@@ -3297,6 +3845,18 @@ main (void)
test_header_content_length_overflow_error(); test_header_content_length_overflow_error();
test_chunk_content_length_overflow_error(); test_chunk_content_length_overflow_error();
//// HEADER FIELD CONDITIONS
test_double_content_length_error(HTTP_REQUEST);
test_chunked_content_length_error(HTTP_REQUEST);
test_header_cr_no_lf_error(HTTP_REQUEST);
test_invalid_header_field_token_error(HTTP_REQUEST);
test_invalid_header_field_content_error(HTTP_REQUEST);
test_double_content_length_error(HTTP_RESPONSE);
test_chunked_content_length_error(HTTP_RESPONSE);
test_header_cr_no_lf_error(HTTP_RESPONSE);
test_invalid_header_field_token_error(HTTP_RESPONSE);
test_invalid_header_field_content_error(HTTP_RESPONSE);
//// RESPONSES //// RESPONSES
for (i = 0; i < response_count; i++) { for (i = 0; i < response_count; i++) {
@@ -3343,7 +3903,11 @@ main (void)
, { "Content-Type", "text/plain" } , { "Content-Type", "text/plain" }
} }
,.body_size= 31337*1024 ,.body_size= 31337*1024
,.num_chunks_complete= 31338
}; };
for (i = 0; i < MAX_CHUNKS; i++) {
large_chunked.chunk_lengths[i] = 1024;
}
test_message_count_body(&large_chunked); test_message_count_body(&large_chunked);
free(msg); free(msg);
} }
@@ -3392,7 +3956,12 @@ main (void)
"MOVE", "MOVE",
"PROPFIND", "PROPFIND",
"PROPPATCH", "PROPPATCH",
"SEARCH",
"UNLOCK", "UNLOCK",
"BIND",
"REBIND",
"UNBIND",
"ACL",
"REPORT", "REPORT",
"MKACTIVITY", "MKACTIVITY",
"CHECKOUT", "CHECKOUT",
@@ -3402,6 +3971,10 @@ main (void)
"SUBSCRIBE", "SUBSCRIBE",
"UNSUBSCRIBE", "UNSUBSCRIBE",
"PATCH", "PATCH",
"PURGE",
"MKCALENDAR",
"LINK",
"UNLINK",
0 }; 0 };
const char **this_method; const char **this_method;
for (this_method = all_methods; *this_method; this_method++) { for (this_method = all_methods; *this_method; this_method++) {
@@ -3430,6 +4003,13 @@ main (void)
test_simple(buf, HPE_INVALID_METHOD); test_simple(buf, HPE_INVALID_METHOD);
} }
// illegal header field name line folding
test_simple("GET / HTTP/1.1\r\n"
"name\r\n"
" : value\r\n"
"\r\n",
HPE_INVALID_HEADER_TOKEN);
const char *dumbfuck2 = const char *dumbfuck2 =
"GET / HTTP/1.1\r\n" "GET / HTTP/1.1\r\n"
"X-SSL-Bullshit: -----BEGIN CERTIFICATE-----\r\n" "X-SSL-Bullshit: -----BEGIN CERTIFICATE-----\r\n"
@@ -3467,6 +4047,22 @@ main (void)
"\r\n"; "\r\n";
test_simple(dumbfuck2, HPE_OK); test_simple(dumbfuck2, HPE_OK);
const char *corrupted_connection =
"GET / HTTP/1.1\r\n"
"Host: www.example.com\r\n"
"Connection\r\033\065\325eep-Alive\r\n"
"Accept-Encoding: gzip\r\n"
"\r\n";
test_simple(corrupted_connection, HPE_INVALID_HEADER_TOKEN);
const char *corrupted_header_name =
"GET / HTTP/1.1\r\n"
"Host: www.example.com\r\n"
"X-Some-Header\r\033\065\325eep-Alive\r\n"
"Accept-Encoding: gzip\r\n"
"\r\n";
test_simple(corrupted_header_name, HPE_INVALID_HEADER_TOKEN);
#if 0 #if 0
// NOTE(Wed Nov 18 11:57:27 CET 2009) this seems okay. we just read body // NOTE(Wed Nov 18 11:57:27 CET 2009) this seems okay. we just read body
// until EOF. // until EOF.