mirror of
https://github.com/oven-sh/bun
synced 2026-02-02 15:08:46 +00:00
compat(node:http) more (#19527)
Co-authored-by: cirospaciari <6379399+cirospaciari@users.noreply.github.com> Co-authored-by: 190n <ben@bun.sh>
This commit is contained in:
@@ -249,6 +249,7 @@ public:
|
||||
}
|
||||
|
||||
static TemplatedApp<SSL>* create(SocketContextOptions options = {}) {
|
||||
|
||||
auto* httpContext = HttpContext<SSL>::create(Loop::get(), options);
|
||||
if (!httpContext) {
|
||||
return nullptr;
|
||||
@@ -628,8 +629,14 @@ public:
|
||||
return std::move(*this);
|
||||
}
|
||||
|
||||
TemplatedApp &&setRequireHostHeader(bool value) {
|
||||
httpContext->getSocketContextData()->flags.requireHostHeader = value;
|
||||
TemplatedApp &&setFlags(bool requireHostHeader, bool useStrictMethodValidation) {
|
||||
httpContext->getSocketContextData()->flags.requireHostHeader = requireHostHeader;
|
||||
httpContext->getSocketContextData()->flags.useStrictMethodValidation = useStrictMethodValidation;
|
||||
return std::move(*this);
|
||||
}
|
||||
|
||||
TemplatedApp &&setMaxHTTPHeaderSize(uint64_t maxHeaderSize) {
|
||||
httpContext->getSocketContextData()->maxHeaderSize = maxHeaderSize;
|
||||
return std::move(*this);
|
||||
}
|
||||
|
||||
|
||||
@@ -29,53 +29,110 @@
|
||||
|
||||
namespace uWS {
|
||||
|
||||
constexpr uint64_t STATE_HAS_SIZE = 1ull << (sizeof(uint64_t) * 8 - 1);//0x80000000;
|
||||
constexpr uint64_t STATE_IS_CHUNKED = 1ull << (sizeof(uint64_t) * 8 - 2);//0x40000000;
|
||||
constexpr uint64_t STATE_SIZE_MASK = ~(3ull << (sizeof(uint64_t) * 8 - 2));//0x3FFFFFFF;
|
||||
constexpr uint64_t STATE_IS_ERROR = ~0ull;//0xFFFFFFFF;
|
||||
constexpr uint64_t STATE_SIZE_OVERFLOW = 0x0Full << (sizeof(uint64_t) * 8 - 8);//0x0F000000;
|
||||
constexpr uint64_t STATE_HAS_SIZE = 1ull << (sizeof(uint64_t) * 8 - 1);//0x8000000000000000;
|
||||
constexpr uint64_t STATE_IS_CHUNKED = 1ull << (sizeof(uint64_t) * 8 - 2);//0x4000000000000000;
|
||||
constexpr uint64_t STATE_IS_CHUNKED_EXTENSION = 1ull << (sizeof(uint64_t) * 8 - 3);//0x2000000000000000;
|
||||
constexpr uint64_t STATE_SIZE_MASK = ~(STATE_HAS_SIZE | STATE_IS_CHUNKED | STATE_IS_CHUNKED_EXTENSION);//0x1FFFFFFFFFFFFFFF;
|
||||
constexpr uint64_t STATE_IS_ERROR = ~0ull;//0xFFFFFFFFFFFFFFFF;
|
||||
constexpr uint64_t STATE_SIZE_OVERFLOW = 0x0Full << (sizeof(uint64_t) * 8 - 8);//0x0F00000000000000;
|
||||
|
||||
inline unsigned int chunkSize(uint64_t state) {
|
||||
return state & STATE_SIZE_MASK;
|
||||
}
|
||||
|
||||
inline bool isParsingChunkedExtension(uint64_t state) {
|
||||
return (state & STATE_IS_CHUNKED_EXTENSION) != 0;
|
||||
}
|
||||
|
||||
/* Reads hex number until CR or out of data to consume. Updates state. Returns bytes consumed. */
|
||||
inline void consumeHexNumber(std::string_view &data, uint64_t &state) {
|
||||
/* Consume everything higher than 32 */
|
||||
while (data.length() && data[0] > 32) {
|
||||
|
||||
unsigned char digit = (unsigned char)data[0];
|
||||
if (digit >= 'a') {
|
||||
digit = (unsigned char) (digit - ('a' - ':'));
|
||||
} else if (digit >= 'A') {
|
||||
digit = (unsigned char) (digit - ('A' - ':'));
|
||||
/* RFC 9110: 5.5 Field Values (TLDR; anything above 31 is allowed \r, \n ; depending on context)*/
|
||||
|
||||
if(!isParsingChunkedExtension(state)){
|
||||
/* Consume everything higher than 32 and not ; (extension)*/
|
||||
while (data.length() && data[0] > 32 && data[0] != ';') {
|
||||
|
||||
unsigned char digit = (unsigned char)data[0];
|
||||
if (digit >= 'a') {
|
||||
digit = (unsigned char) (digit - ('a' - ':'));
|
||||
} else if (digit >= 'A') {
|
||||
digit = (unsigned char) (digit - ('A' - ':'));
|
||||
}
|
||||
|
||||
unsigned int number = ((unsigned int) digit - (unsigned int) '0');
|
||||
|
||||
if (number > 16 || (chunkSize(state) & STATE_SIZE_OVERFLOW)) {
|
||||
state = STATE_IS_ERROR;
|
||||
return;
|
||||
}
|
||||
|
||||
// extract state bits
|
||||
uint64_t bits = /*state &*/ STATE_IS_CHUNKED;
|
||||
|
||||
state = (state & STATE_SIZE_MASK) * 16ull + number;
|
||||
|
||||
state |= bits;
|
||||
data.remove_prefix(1);
|
||||
}
|
||||
|
||||
unsigned int number = ((unsigned int) digit - (unsigned int) '0');
|
||||
|
||||
if (number > 16 || (chunkSize(state) & STATE_SIZE_OVERFLOW)) {
|
||||
state = STATE_IS_ERROR;
|
||||
return;
|
||||
}
|
||||
|
||||
auto len = data.length();
|
||||
if(len) {
|
||||
// consume extension
|
||||
if(data[0] == ';' || isParsingChunkedExtension(state)) {
|
||||
// mark that we are parsing chunked extension
|
||||
state |= STATE_IS_CHUNKED_EXTENSION;
|
||||
/* we got chunk extension lets remove it*/
|
||||
while(data.length()) {
|
||||
if(data[0] == '\r') {
|
||||
// we are done parsing extension
|
||||
state &= ~STATE_IS_CHUNKED_EXTENSION;
|
||||
break;
|
||||
}
|
||||
/* RFC 9110: Token format (TLDR; anything bellow 32 is not allowed)
|
||||
* TODO: add support for quoted-strings values (RFC 9110: 3.2.6. Quoted-String)
|
||||
* Example of chunked encoding with extensions:
|
||||
*
|
||||
* 4;key=value\r\n
|
||||
* Wiki\r\n
|
||||
* 5;foo=bar;baz=quux\r\n
|
||||
* pedia\r\n
|
||||
* 0\r\n
|
||||
* \r\n
|
||||
*
|
||||
* The chunk size is in hex (4, 5, 0), followed by optional
|
||||
* semicolon-separated extensions. Extensions consist of a key
|
||||
* (token) and optional value. The value may be a token or a
|
||||
* quoted string. The chunk data follows the CRLF after the
|
||||
* extensions and must be exactly the size specified.
|
||||
*
|
||||
* RFC 7230 Section 4.1.1 defines chunk extensions as:
|
||||
* chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
|
||||
* chunk-ext-name = token
|
||||
* chunk-ext-val = token / quoted-string
|
||||
*/
|
||||
if(data[0] <= 32) {
|
||||
state = STATE_IS_ERROR;
|
||||
return;
|
||||
}
|
||||
|
||||
data.remove_prefix(1);
|
||||
}
|
||||
}
|
||||
if(data.length() >= 2) {
|
||||
/* Consume \r\n */
|
||||
if((data[0] != '\r' || data[1] != '\n')) {
|
||||
state = STATE_IS_ERROR;
|
||||
return;
|
||||
}
|
||||
state += 2; // include the two last /r/n
|
||||
state |= STATE_HAS_SIZE | STATE_IS_CHUNKED;
|
||||
|
||||
// extract state bits
|
||||
uint64_t bits = /*state &*/ STATE_IS_CHUNKED;
|
||||
|
||||
state = (state & STATE_SIZE_MASK) * 16ull + number;
|
||||
|
||||
state |= bits;
|
||||
data.remove_prefix(1);
|
||||
}
|
||||
/* Consume everything not /n */
|
||||
while (data.length() && data[0] != '\n') {
|
||||
data.remove_prefix(1);
|
||||
}
|
||||
/* Now we stand on \n so consume it and enable size */
|
||||
if (data.length()) {
|
||||
state += 2; // include the two last /r/n
|
||||
state |= STATE_HAS_SIZE | STATE_IS_CHUNKED;
|
||||
data.remove_prefix(1);
|
||||
data.remove_prefix(2);
|
||||
}
|
||||
}
|
||||
// short read
|
||||
}
|
||||
|
||||
inline void decChunkSize(uint64_t &state, unsigned int by) {
|
||||
|
||||
@@ -71,7 +71,7 @@ private:
|
||||
// if we are SSL we need to handle the handshake properly
|
||||
us_socket_context_on_handshake(SSL, getSocketContext(), [](us_socket_t *s, int success, struct us_bun_verify_error_t verify_error, void* custom_data) {
|
||||
// if we are closing or already closed, we don't need to do anything
|
||||
if (!us_socket_is_closed(SSL, s) && !us_socket_is_shut_down(SSL, s)) {
|
||||
if (!us_socket_is_closed(SSL, s)) {
|
||||
HttpContextData<SSL> *httpContextData = getSocketContextDataS(s);
|
||||
httpContextData->flags.isAuthorized = success;
|
||||
if(httpContextData->flags.rejectUnauthorized) {
|
||||
@@ -123,11 +123,8 @@ private:
|
||||
|
||||
/* Call filter */
|
||||
HttpContextData<SSL> *httpContextData = getSocketContextDataS(s);
|
||||
if(httpContextData->flags.isParsingHttp) {
|
||||
if(httpContextData->onClientError) {
|
||||
httpContextData->onClientError(SSL, s,uWS::HTTP_PARSER_ERROR_INVALID_EOF, nullptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for (auto &f : httpContextData->filterHandlers) {
|
||||
f((HttpResponse<SSL> *) s, -1);
|
||||
}
|
||||
@@ -149,6 +146,7 @@ private:
|
||||
|
||||
/* Handle HTTP data streams */
|
||||
us_socket_context_on_data(SSL, getSocketContext(), [](us_socket_t *s, char *data, int length) {
|
||||
|
||||
// ref the socket to make sure we process it entirely before it is closed
|
||||
us_socket_ref(s);
|
||||
|
||||
@@ -172,7 +170,6 @@ private:
|
||||
|
||||
/* Mark that we are inside the parser now */
|
||||
httpContextData->flags.isParsingHttp = true;
|
||||
|
||||
// clients need to know the cursor after http parse, not servers!
|
||||
// how far did we read then? we need to know to continue with websocket parsing data? or?
|
||||
|
||||
@@ -182,7 +179,8 @@ private:
|
||||
#endif
|
||||
|
||||
/* The return value is entirely up to us to interpret. The HttpParser cares only for whether the returned value is DIFFERENT from passed user */
|
||||
auto result = httpResponseData->consumePostPadded(httpContextData->flags.requireHostHeader,data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * {
|
||||
|
||||
auto result = httpResponseData->consumePostPadded(httpContextData->maxHeaderSize, httpContextData->flags.requireHostHeader,httpContextData->flags.useStrictMethodValidation, data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * {
|
||||
/* For every request we reset the timeout and hang until user makes action */
|
||||
/* Warning: if we are in shutdown state, resetting the timer is a security issue! */
|
||||
us_socket_timeout(SSL, (us_socket_t *) s, 0);
|
||||
@@ -201,6 +199,7 @@ private:
|
||||
|
||||
/* Mark pending request and emit it */
|
||||
httpResponseData->state = HttpResponseData<SSL>::HTTP_RESPONSE_PENDING;
|
||||
|
||||
|
||||
/* Mark this response as connectionClose if ancient or connection: close */
|
||||
if (httpRequest->isAncient() || httpRequest->getHeader("connection").length() == 5) {
|
||||
@@ -209,7 +208,6 @@ private:
|
||||
|
||||
httpResponseData->fromAncientRequest = httpRequest->isAncient();
|
||||
|
||||
|
||||
/* Select the router based on SNI (only possible for SSL) */
|
||||
auto *selectedRouter = &httpContextData->router;
|
||||
if constexpr (SSL) {
|
||||
@@ -261,7 +259,7 @@ private:
|
||||
}, [httpResponseData](void *user, std::string_view data, bool fin) -> void * {
|
||||
/* We always get an empty chunk even if there is no data */
|
||||
if (httpResponseData->inStream) {
|
||||
|
||||
|
||||
/* Todo: can this handle timeout for non-post as well? */
|
||||
if (fin) {
|
||||
/* If we just got the last chunk (or empty chunk), disable timeout */
|
||||
@@ -299,7 +297,7 @@ private:
|
||||
});
|
||||
|
||||
auto httpErrorStatusCode = result.httpErrorStatusCode();
|
||||
|
||||
|
||||
/* Mark that we are no longer parsing Http */
|
||||
httpContextData->flags.isParsingHttp = false;
|
||||
/* If we got fullptr that means the parser wants us to close the socket from error (same as calling the errorHandler) */
|
||||
|
||||
@@ -33,6 +33,7 @@ struct HttpFlags {
|
||||
bool usingCustomExpectHandler: 1 = false;
|
||||
bool requireHostHeader: 1 = true;
|
||||
bool isAuthorized: 1 = false;
|
||||
bool useStrictMethodValidation: 1 = false;
|
||||
};
|
||||
|
||||
template <bool SSL>
|
||||
@@ -63,6 +64,7 @@ private:
|
||||
OnClientErrorCallback onClientError = nullptr;
|
||||
|
||||
HttpFlags flags;
|
||||
uint64_t maxHeaderSize = 0; // 0 means no limit
|
||||
|
||||
// TODO: SNI
|
||||
void clearRoutes() {
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
#include "QueryParser.h"
|
||||
#include "HttpErrors.h"
|
||||
extern "C" size_t BUN_DEFAULT_MAX_HTTP_HEADER_SIZE;
|
||||
extern "C" int16_t Bun__HTTPMethod__from(const char *str, size_t len);
|
||||
|
||||
namespace uWS
|
||||
{
|
||||
@@ -57,6 +58,7 @@ namespace uWS
|
||||
HTTP_PARSER_ERROR_INVALID_HTTP_VERSION = 7,
|
||||
HTTP_PARSER_ERROR_INVALID_EOF = 8,
|
||||
HTTP_PARSER_ERROR_INVALID_METHOD = 9,
|
||||
HTTP_PARSER_ERROR_INVALID_HEADER_TOKEN = 10,
|
||||
};
|
||||
|
||||
|
||||
@@ -65,6 +67,7 @@ namespace uWS
|
||||
HTTP_HEADER_PARSER_ERROR_INVALID_HTTP_VERSION = 1,
|
||||
HTTP_HEADER_PARSER_ERROR_INVALID_REQUEST = 2,
|
||||
HTTP_HEADER_PARSER_ERROR_INVALID_METHOD = 3,
|
||||
HTTP_HEADER_PARSER_ERROR_REQUEST_HEADER_FIELDS_TOO_LARGE = 4,
|
||||
};
|
||||
|
||||
struct HttpParserResult {
|
||||
@@ -100,6 +103,11 @@ namespace uWS
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool isShortRead() {
|
||||
return parserError == HTTP_PARSER_ERROR_NONE && errorStatusCodeOrConsumedBytes == 0;
|
||||
}
|
||||
|
||||
|
||||
/* Returns true if there was an error */
|
||||
bool isError() {
|
||||
return parserError != HTTP_PARSER_ERROR_NONE;
|
||||
@@ -365,7 +373,7 @@ namespace uWS
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void *consumeFieldName(char *p) {
|
||||
static inline char *consumeFieldName(char *p) {
|
||||
/* Best case fast path (particularly useful with clang) */
|
||||
while (true) {
|
||||
while ((*p >= 65) & (*p <= 90)) [[likely]] {
|
||||
@@ -376,7 +384,7 @@ namespace uWS
|
||||
p++;
|
||||
}
|
||||
if (*p == ':') {
|
||||
return (void *)p;
|
||||
return p;
|
||||
}
|
||||
if (*p == '-') {
|
||||
p++;
|
||||
@@ -390,11 +398,15 @@ namespace uWS
|
||||
while (isFieldNameByteFastLowercased(*(unsigned char *)p)) {
|
||||
p++;
|
||||
}
|
||||
return (void *)p;
|
||||
return p;
|
||||
}
|
||||
|
||||
static bool isValidMethod(std::string_view str) {
|
||||
static bool isValidMethod(std::string_view str, bool useStrictMethodValidation) {
|
||||
if (str.empty()) return false;
|
||||
|
||||
if (useStrictMethodValidation) {
|
||||
return Bun__HTTPMethod__from(str.data(), str.length()) != -1;
|
||||
}
|
||||
|
||||
for (char c : str) {
|
||||
if (!isValidMethodChar(c))
|
||||
@@ -449,7 +461,7 @@ namespace uWS
|
||||
|
||||
|
||||
/* Puts method as key, target as value and returns non-null (or nullptr on error). */
|
||||
static inline ConsumeRequestLineResult consumeRequestLine(char *data, char *end, HttpRequest::Header &header) {
|
||||
static inline ConsumeRequestLineResult consumeRequestLine(char *data, char *end, HttpRequest::Header &header, bool useStrictMethodValidation, uint64_t maxHeaderSize) {
|
||||
/* Scan until single SP, assume next is / (origin request) */
|
||||
char *start = data;
|
||||
/* This catches the post padded CR and fails */
|
||||
@@ -460,14 +472,17 @@ namespace uWS
|
||||
data++;
|
||||
|
||||
}
|
||||
if (&data[1] == end) [[unlikely]] {
|
||||
if(start == data) [[unlikely]] {
|
||||
return ConsumeRequestLineResult::error(HTTP_HEADER_PARSER_ERROR_INVALID_METHOD);
|
||||
}
|
||||
if (data - start < 2) [[unlikely]] {
|
||||
return ConsumeRequestLineResult::shortRead();
|
||||
}
|
||||
|
||||
if (data[0] == 32 && (__builtin_expect(data[1] == '/', 1) || isHTTPorHTTPSPrefixForProxies(data + 1, end) == 1)) [[likely]] {
|
||||
header.key = {start, (size_t) (data - start)};
|
||||
data++;
|
||||
if(!isValidMethod(header.key)) {
|
||||
if(!isValidMethod(header.key, useStrictMethodValidation)) {
|
||||
return ConsumeRequestLineResult::error(HTTP_HEADER_PARSER_ERROR_INVALID_METHOD);
|
||||
}
|
||||
/* Scan for less than 33 (catches post padded CR and fails) */
|
||||
@@ -475,8 +490,14 @@ namespace uWS
|
||||
for (; true; data += 8) {
|
||||
uint64_t word;
|
||||
memcpy(&word, data, sizeof(uint64_t));
|
||||
if(maxHeaderSize && (uintptr_t)(data - start) > maxHeaderSize) {
|
||||
return ConsumeRequestLineResult::error(HTTP_HEADER_PARSER_ERROR_REQUEST_HEADER_FIELDS_TOO_LARGE);
|
||||
}
|
||||
if (hasLess(word, 33)) {
|
||||
while (*(unsigned char *)data > 32) data++;
|
||||
if(maxHeaderSize && (uintptr_t)(data - start) > maxHeaderSize) {
|
||||
return ConsumeRequestLineResult::error(HTTP_HEADER_PARSER_ERROR_REQUEST_HEADER_FIELDS_TOO_LARGE);
|
||||
}
|
||||
/* Now we stand on space */
|
||||
header.value = {start, (size_t) (data - start)};
|
||||
auto nextPosition = data + 11;
|
||||
@@ -530,21 +551,20 @@ namespace uWS
|
||||
* Field values are usually constrained to the range of US-ASCII characters [...]
|
||||
* Field values containing CR, LF, or NUL characters are invalid and dangerous [...]
|
||||
* Field values containing other CTL characters are also invalid. */
|
||||
static inline void *tryConsumeFieldValue(char *p) {
|
||||
static inline char * tryConsumeFieldValue(char *p) {
|
||||
for (; true; p += 8) {
|
||||
uint64_t word;
|
||||
memcpy(&word, p, sizeof(uint64_t));
|
||||
if (hasLess(word, 32)) {
|
||||
while (*(unsigned char *)p > 31) p++;
|
||||
return (void *)p;
|
||||
return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* End is only used for the proxy parser. The HTTP parser recognizes "\ra" as invalid "\r\n" scan and breaks. */
|
||||
static HttpParserResult getHeaders(char *postPaddedBuffer, char *end, struct HttpRequest::Header *headers, void *reserved, bool &isAncientHTTP) {
|
||||
static HttpParserResult getHeaders(char *postPaddedBuffer, char *end, struct HttpRequest::Header *headers, void *reserved, bool &isAncientHTTP, bool useStrictMethodValidation, uint64_t maxHeaderSize) {
|
||||
char *preliminaryKey, *preliminaryValue, *start = postPaddedBuffer;
|
||||
|
||||
#ifdef UWS_WITH_PROXY
|
||||
/* ProxyParser is passed as reserved parameter */
|
||||
ProxyParser *pp = (ProxyParser *) reserved;
|
||||
@@ -572,7 +592,8 @@ namespace uWS
|
||||
* which is then removed, and our counters to flip due to overflow and we end up with a crash */
|
||||
|
||||
/* The request line is different from the field names / field values */
|
||||
auto requestLineResult = consumeRequestLine(postPaddedBuffer, end, headers[0]);
|
||||
auto requestLineResult = consumeRequestLine(postPaddedBuffer, end, headers[0], useStrictMethodValidation, maxHeaderSize);
|
||||
|
||||
if (requestLineResult.isErrorOrShortRead()) {
|
||||
/* Error - invalid request line */
|
||||
/* Assuming it is 505 HTTP Version Not Supported */
|
||||
@@ -583,6 +604,8 @@ namespace uWS
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_REQUEST);
|
||||
case HTTP_HEADER_PARSER_ERROR_INVALID_METHOD:
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_METHOD);
|
||||
case HTTP_HEADER_PARSER_ERROR_REQUEST_HEADER_FIELDS_TOO_LARGE:
|
||||
return HttpParserResult::error(HTTP_ERROR_431_REQUEST_HEADER_FIELDS_TOO_LARGE, HTTP_PARSER_ERROR_REQUEST_HEADER_FIELDS_TOO_LARGE);
|
||||
default: {
|
||||
/* Short read */
|
||||
}
|
||||
@@ -596,6 +619,8 @@ namespace uWS
|
||||
}
|
||||
/* No request headers found */
|
||||
size_t buffer_size = end - postPaddedBuffer;
|
||||
const char * headerStart = (headers[0].key.length() > 0) ? headers[0].key.data() : end;
|
||||
|
||||
if(buffer_size < 2) {
|
||||
/* Fragmented request */
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_REQUEST);
|
||||
@@ -609,9 +634,11 @@ namespace uWS
|
||||
for (unsigned int i = 1; i < UWS_HTTP_MAX_HEADERS_COUNT - 1; i++) {
|
||||
/* Lower case and consume the field name */
|
||||
preliminaryKey = postPaddedBuffer;
|
||||
postPaddedBuffer = (char *) consumeFieldName(postPaddedBuffer);
|
||||
postPaddedBuffer = consumeFieldName(postPaddedBuffer);
|
||||
headers->key = std::string_view(preliminaryKey, (size_t) (postPaddedBuffer - preliminaryKey));
|
||||
|
||||
if(maxHeaderSize && (uintptr_t)(postPaddedBuffer - headerStart) > maxHeaderSize) {
|
||||
return HttpParserResult::error(HTTP_ERROR_431_REQUEST_HEADER_FIELDS_TOO_LARGE, HTTP_PARSER_ERROR_REQUEST_HEADER_FIELDS_TOO_LARGE);
|
||||
}
|
||||
/* We should not accept whitespace between key and colon, so colon must foloow immediately */
|
||||
if (postPaddedBuffer[0] != ':') {
|
||||
/* If we stand at the end, we are fragmented */
|
||||
@@ -619,14 +646,14 @@ namespace uWS
|
||||
return HttpParserResult::shortRead();
|
||||
}
|
||||
/* Error: invalid chars in field name */
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_REQUEST);
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_HEADER_TOKEN);
|
||||
}
|
||||
postPaddedBuffer++;
|
||||
|
||||
preliminaryValue = postPaddedBuffer;
|
||||
/* The goal of this call is to find next "\r\n", or any invalid field value chars, fast */
|
||||
while (true) {
|
||||
postPaddedBuffer = (char *) tryConsumeFieldValue(postPaddedBuffer);
|
||||
postPaddedBuffer = tryConsumeFieldValue(postPaddedBuffer);
|
||||
/* If this is not CR then we caught some stinky invalid char on the way */
|
||||
if (postPaddedBuffer[0] != '\r') {
|
||||
/* If TAB then keep searching */
|
||||
@@ -635,17 +662,22 @@ namespace uWS
|
||||
continue;
|
||||
}
|
||||
/* Error - invalid chars in field value */
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_REQUEST);
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_HEADER_TOKEN);
|
||||
}
|
||||
break;
|
||||
}
|
||||
if(maxHeaderSize && (uintptr_t)(postPaddedBuffer - headerStart) > maxHeaderSize) {
|
||||
return HttpParserResult::error(HTTP_ERROR_431_REQUEST_HEADER_FIELDS_TOO_LARGE, HTTP_PARSER_ERROR_REQUEST_HEADER_FIELDS_TOO_LARGE);
|
||||
}
|
||||
if (end - postPaddedBuffer < 2) {
|
||||
return HttpParserResult::shortRead();
|
||||
}
|
||||
/* We fence end[0] with \r, followed by end[1] being something that is "not \n", to signify "not found".
|
||||
* This way we can have this one single check to see if we found \r\n WITHIN our allowed search space. */
|
||||
if (postPaddedBuffer[1] == '\n') {
|
||||
/* Store this header, it is valid */
|
||||
headers->value = std::string_view(preliminaryValue, (size_t) (postPaddedBuffer - preliminaryValue));
|
||||
postPaddedBuffer += 2;
|
||||
|
||||
/* Trim trailing whitespace (SP, HTAB) */
|
||||
while (headers->value.length() && headers->value.back() < 33) {
|
||||
headers->value.remove_suffix(1);
|
||||
@@ -656,6 +688,9 @@ namespace uWS
|
||||
headers->value.remove_prefix(1);
|
||||
}
|
||||
|
||||
if(maxHeaderSize && (uintptr_t)(postPaddedBuffer - headerStart) > maxHeaderSize) {
|
||||
return HttpParserResult::error(HTTP_ERROR_431_REQUEST_HEADER_FIELDS_TOO_LARGE, HTTP_PARSER_ERROR_REQUEST_HEADER_FIELDS_TOO_LARGE);
|
||||
}
|
||||
headers++;
|
||||
|
||||
/* We definitely have at least one header (or request line), so check if we are done */
|
||||
@@ -673,6 +708,11 @@ namespace uWS
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
if(postPaddedBuffer[0] == '\r') {
|
||||
// invalid char after \r
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_REQUEST);
|
||||
}
|
||||
/* We are either out of search space or this is a malformed request */
|
||||
return HttpParserResult::shortRead();
|
||||
}
|
||||
@@ -683,7 +723,7 @@ namespace uWS
|
||||
|
||||
/* This is the only caller of getHeaders and is thus the deepest part of the parser. */
|
||||
template <bool ConsumeMinimally>
|
||||
HttpParserResult fenceAndConsumePostPadded(bool requireHostHeader, char *data, unsigned int length, void *user, void *reserved, HttpRequest *req, MoveOnlyFunction<void *(void *, HttpRequest *)> &requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &dataHandler) {
|
||||
HttpParserResult fenceAndConsumePostPadded(uint64_t maxHeaderSize, bool requireHostHeader, bool useStrictMethodValidation, char *data, unsigned int length, void *user, void *reserved, HttpRequest *req, MoveOnlyFunction<void *(void *, HttpRequest *)> &requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &dataHandler) {
|
||||
|
||||
/* How much data we CONSUMED (to throw away) */
|
||||
unsigned int consumedTotal = 0;
|
||||
@@ -694,7 +734,7 @@ namespace uWS
|
||||
data[length + 1] = 'a'; /* Anything that is not \n, to trigger "invalid request" */
|
||||
req->ancientHttp = false;
|
||||
for (;length;) {
|
||||
auto result = getHeaders(data, data + length, req->headers, reserved, req->ancientHttp);
|
||||
auto result = getHeaders(data, data + length, req->headers, reserved, req->ancientHttp, useStrictMethodValidation, maxHeaderSize);
|
||||
if(result.isError()) {
|
||||
return result;
|
||||
}
|
||||
@@ -826,7 +866,7 @@ namespace uWS
|
||||
}
|
||||
|
||||
public:
|
||||
HttpParserResult consumePostPadded(bool requireHostHeader, char *data, unsigned int length, void *user, void *reserved, MoveOnlyFunction<void *(void *, HttpRequest *)> &&requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &&dataHandler) {
|
||||
HttpParserResult consumePostPadded(uint64_t maxHeaderSize, bool requireHostHeader, bool useStrictMethodValidation, char *data, unsigned int length, void *user, void *reserved, MoveOnlyFunction<void *(void *, HttpRequest *)> &&requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &&dataHandler) {
|
||||
|
||||
/* This resets BloomFilter by construction, but later we also reset it again.
|
||||
* Optimize this to skip resetting twice (req could be made global) */
|
||||
@@ -875,7 +915,7 @@ public:
|
||||
fallback.append(data, maxCopyDistance);
|
||||
|
||||
// break here on break
|
||||
HttpParserResult consumed = fenceAndConsumePostPadded<true>(requireHostHeader,fallback.data(), (unsigned int) fallback.length(), user, reserved, &req, requestHandler, dataHandler);
|
||||
HttpParserResult consumed = fenceAndConsumePostPadded<true>(maxHeaderSize, requireHostHeader, useStrictMethodValidation, fallback.data(), (unsigned int) fallback.length(), user, reserved, &req, requestHandler, dataHandler);
|
||||
/* Return data will be different than user if we are upgraded to WebSocket or have an error */
|
||||
if (consumed.returnedData != user) {
|
||||
return consumed;
|
||||
@@ -932,7 +972,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
HttpParserResult consumed = fenceAndConsumePostPadded<false>(requireHostHeader,data, length, user, reserved, &req, requestHandler, dataHandler);
|
||||
HttpParserResult consumed = fenceAndConsumePostPadded<false>(maxHeaderSize, requireHostHeader, useStrictMethodValidation, data, length, user, reserved, &req, requestHandler, dataHandler);
|
||||
/* Return data will be different than user if we are upgraded to WebSocket or have an error */
|
||||
if (consumed.returnedData != user) {
|
||||
return consumed;
|
||||
|
||||
@@ -102,6 +102,7 @@ struct HttpResponseData : AsyncSocketData<SSL>, HttpParser {
|
||||
uint8_t idleTimeout = 10; // default HTTP_TIMEOUT 10 seconds
|
||||
bool fromAncientRequest = false;
|
||||
|
||||
|
||||
#ifdef UWS_WITH_PROXY
|
||||
ProxyParser proxyParser;
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user