diff --git a/internal/chunked.go b/internal/chunked.go new file mode 100644 index 0000000..dd207bd --- /dev/null +++ b/internal/chunked.go @@ -0,0 +1,282 @@ +package internal + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" +) + +const maxLineLength = 4096 // assumed <= bufio.defaultBufSize + +var ErrLineTooLong = errors.New("header line too long") + +// NewChunkedReader returns a new chunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// +// NewChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func NewChunkedReader(r io.Reader) io.Reader { + br, ok := r.(*bufio.Reader) + if !ok { + br = bufio.NewReader(r) + } + + return &chunkedReader{r: br} +} + +type chunkedReader struct { + r *bufio.Reader + n uint64 // unread bytes in chunk + err error + buf [2]byte + checkEnd bool // whether need to check for \r\n chunk footer +} + +func (cr *chunkedReader) beginChunk() { + // chunk-size CRLF + var line []byte + line, cr.err = readChunkLine(cr.r) + + if cr.err != nil { + return + } + + cr.n, cr.err = parseHexUint(line) + if cr.err != nil { + return + } + + if cr.n == 0 { + cr.err = io.EOF + } +} + +func (cr *chunkedReader) chunkHeaderAvailable() bool { + n := cr.r.Buffered() + if n > 0 { + peek, _ := cr.r.Peek(n) + return bytes.IndexByte(peek, '\n') >= 0 + } + + return false +} + +func (cr *chunkedReader) Read(b []uint8) (n int, err error) { + for cr.err == nil { + if cr.checkEnd { + if n > 0 && cr.r.Buffered() < 2 { + // We have some data. Return early (per the io.Reader + // contract) instead of potentially blocking while + // reading more. + break + } + + if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { + if string(cr.buf[:]) != "\r\n" { + cr.err = errors.New("malformed chunked encoding") + break + } + } else { + if errors.Is(cr.err, io.EOF) { + cr.err = io.ErrUnexpectedEOF + } + + break + } + + cr.checkEnd = false + } + + if cr.n == 0 { + if n > 0 && !cr.chunkHeaderAvailable() { + // We've read enough. Don't potentially block + // reading a new chunk header. + break + } + + cr.beginChunk() + + continue + } + + if len(b) == 0 { + break + } + + rbuf := b + if uint64(len(rbuf)) > cr.n { + rbuf = rbuf[:cr.n] + } + + var n0 int + n0, cr.err = cr.r.Read(rbuf) + n += n0 + b = b[n0:] + cr.n -= uint64(n0) + // If we're at the end of a chunk, read the next two + // bytes to verify they are "\r\n". + if cr.n == 0 && cr.err == nil { + cr.checkEnd = true + } else if errors.Is(cr.err, io.EOF) { + cr.err = io.ErrUnexpectedEOF + } + } + + return n, cr.err +} + +// Read a line of bytes (up to \n) from b. +// Give up if the line exceeds maxLineLength. +// The returned bytes are owned by the bufio.Reader +// so they are only valid until the next bufio read. +func readChunkLine(b *bufio.Reader) ([]byte, error) { + p, err := b.ReadSlice('\n') + if err != nil { + // We always know when EOF is coming. + // If the caller asked for a line, there should be a line. + if errors.Is(err, io.EOF) { + err = io.ErrUnexpectedEOF + } else if errors.Is(err, bufio.ErrBufferFull) { + err = ErrLineTooLong + } + + return nil, err + } + + if len(p) >= maxLineLength { + return nil, ErrLineTooLong + } + + p = trimTrailingWhitespace(p) + p, err = removeChunkExtension(p) + + if err != nil { + return nil, err + } + + return p, nil +} + +func trimTrailingWhitespace(b []byte) []byte { + for len(b) > 0 && isASCIISpace(b[len(b)-1]) { + b = b[:len(b)-1] + } + + return b +} + +func isASCIISpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} + +var semi = []byte(";") + +// removeChunkExtension removes any chunk-extension from p. +// For example, +// +// "0" => "0" +// "0;token" => "0" +// "0;token=val" => "0" +// `0;token="quoted string"` => "0" +func removeChunkExtension(p []byte) ([]byte, error) { + p, _, _ = bytes.Cut(p, semi) + // TODO: care about exact syntax of chunk extensions? We're + // ignoring and stripping them anyway. For now just never + // return an error. + return p, nil +} + +// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// "chunked" format before writing them to w. Closing the returned chunkedWriter +// sends the final 0-length chunk that marks the end of the stream but does +// not send the final CRLF that appears after trailers; trailers and the last +// CRLF must be written separately. +// +// NewChunkedWriter is not needed by normal applications. The http +// package adds chunking automatically if handlers don't set a +// Content-Length header. Using newChunkedWriter inside a handler +// would result in double chunking or chunking with a Content-Length +// length, both of which are wrong. +func NewChunkedWriter(w io.Writer) io.WriteCloser { + return &chunkedWriter{w} +} + +// Writing to chunkedWriter translates to writing in HTTP chunked Transfer +// Encoding wire format to the underlying Wire chunkedWriter. +type chunkedWriter struct { + Wire io.Writer +} + +// Write the contents of data as one chunk to Wire. +// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has +// a bug since it does not check for success of io.WriteString +func (cw *chunkedWriter) Write(data []byte) (n int, err error) { + // Don't send 0-length data. It looks like EOF for chunked encoding. + if len(data) == 0 { + return 0, nil + } + + if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil { + return 0, err + } + + if n, err = cw.Wire.Write(data); err != nil { + return + } + + if n != len(data) { + err = io.ErrShortWrite + return + } + + if _, err = io.WriteString(cw.Wire, "\r\n"); err != nil { + return + } + + if bw, ok := cw.Wire.(*FlushAfterChunkWriter); ok { + err = bw.Flush() + } + + return +} + +func (cw *chunkedWriter) Close() error { + _, err := io.WriteString(cw.Wire, "0\r\n") + return err +} + +// FlushAfterChunkWriter signals from the caller of NewChunkedWriter +// that each chunk should be followed by a flush. It is used by the +// http.Transport code to keep the buffering behavior for headers and +// trailers, but flush out chunks aggressively in the middle for +// request bodies which may be generated slowly. See Issue 6574. +type FlushAfterChunkWriter struct { + *bufio.Writer +} + +func parseHexUint(v []byte) (n uint64, err error) { + for i, b := range v { + switch { + case '0' <= b && b <= '9': + b = b - '0' + case 'a' <= b && b <= 'f': + b = b - 'a' + 10 + case 'A' <= b && b <= 'F': + b = b - 'A' + 10 + default: + return 0, errors.New("invalid byte in chunk length") + } + + if i == 16 { + return 0, errors.New("http chunk length too large") + } + + n <<= 4 + n |= uint64(b) + } + + return +} diff --git a/pkg/request.go b/pkg/request.go new file mode 100644 index 0000000..b3de4cc --- /dev/null +++ b/pkg/request.go @@ -0,0 +1,130 @@ +package pkg + +import ( + "bufio" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" +) + +// parseRequestLine parses "GET /foo HTTP/1.1" into its three parts. +func parseRequestLine(line string) (method, requestURI, proto string, ok bool) { + method, rest, ok1 := strings.Cut(line, " ") + requestURI, proto, ok2 := strings.Cut(rest, " ") + + if !ok1 || !ok2 { + return "", "", "", false + } + + return method, requestURI, proto, true +} + +func readRequest(b *bufio.Reader) (req *http.Request, err error) { + tp := newTextprotoReader(b) + defer putTextprotoReader(tp) + + req = new(http.Request) + + var s string + + // First line: GET /index.html HTTP/1.0 + if s, err = tp.ReadLine(); err != nil { + return nil, err + } + + defer func() { + if errors.Is(err, io.EOF) { + err = io.ErrUnexpectedEOF + } + }() + + var ok bool + req.Method, req.RequestURI, req.Proto, ok = parseRequestLine(s) + + if !ok { + return nil, badStringError("malformed HTTP request", s) + } + + if !ValidMethod(req.Method) { + return nil, badStringError("invalid method", req.Method) + } + + rawurl := req.RequestURI + + if req.ProtoMajor, req.ProtoMinor, ok = http.ParseHTTPVersion(req.Proto); !ok { + return nil, badStringError("malformed HTTP version", req.Proto) + } + + // CONNECT requests are used two different ways, and neither uses a full URL: + // The standard use is to tunnel HTTPS through an HTTP proxy. + // It looks like "CONNECT www.google.com:443 HTTP/1.1", and the parameter is + // just the authority section of a URL. This information should go in req.URL.Host. + // + // The net/rpc package also uses CONNECT, but there the parameter is a path + // that starts with a slash. It can be parsed with the regular URL parser, + // and the path will end up in req.URL.Path, where it needs to be in order for + // RPC to work. + justAuthority := req.Method == http.MethodConnect && !strings.HasPrefix(rawurl, "/") + if justAuthority { + rawurl = "http://" + rawurl + } + + if req.URL, err = url.ParseRequestURI(rawurl); err != nil { + return nil, err + } + + if justAuthority { + // Strip the bogus "http://" back off. + req.URL.Scheme = "" + } + + // Subsequent lines: Key: value. + mimeHeader, err := tp.ReadMIMEHeader() + + if err != nil { + return nil, err + } + + req.Header = http.Header(mimeHeader) + if len(req.Header["Host"]) > 1 { + return nil, fmt.Errorf("too many Host headers") + } + + // RFC 7230, section 5.3: Must treat + // GET /index.html HTTP/1.1 + // Host: www.google.com + // and + // GET http://www.google.com/index.html HTTP/1.1 + // Host: doesntmatter + // the same. In the second case, any Host line is ignored. + req.Host = req.URL.Host + if req.Host == "" { + req.Host = GetHeader(req.Header, "Host") + } + + fixPragmaCacheControl(req.Header) + + req.Close = shouldClose(req.ProtoMajor, req.ProtoMinor, req.Header, false) + + err = readTransfer(req, b) + + if err != nil { + return nil, err + } + + if isH2Upgrade(req) { + // Because it's neither chunked, nor declared: + req.ContentLength = -1 + + // We want to give handlers a chance to hijack the + // connection, but we need to prevent the Server from + // dealing with the connection further if it's not + // hijacked. Set Close to ensure that: + req.Close = true + } + + return req, nil +} diff --git a/pkg/response.go b/pkg/response.go index 2a04ba6..60dea77 100644 --- a/pkg/response.go +++ b/pkg/response.go @@ -18,3 +18,11 @@ func isProtocolSwitchHeader(h http.Header) bool { func isProtocolSwitchResponse(code int, h http.Header) bool { return code == http.StatusSwitchingProtocols && isProtocolSwitchHeader(h) } + +func fixPragmaCacheControl(header http.Header) { + if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" { + if _, presentcc := header["Cache-Control"]; !presentcc { + header["Cache-Control"] = []string{"no-cache"} + } + } +} diff --git a/pkg/server.go b/pkg/server.go index 61ca33e..d551cef 100644 --- a/pkg/server.go +++ b/pkg/server.go @@ -67,6 +67,8 @@ var ( headerDate = []byte("Date: ") ) +var errTooLarge = errors.New("http: request too large") + const closeStr = "close" // rstAvoidanceDelay is the amount of time we sleep after closing the @@ -221,21 +223,6 @@ type chunkWriter struct { chunking bool // using chunked transfer encoding for reply body } -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 7230, section 3.3. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - - return true -} - // foreachHeaderElement splits v according to the "#rule" construction // in RFC 7230 section 7 and calls fn for each non-empty element. func foreachHeaderElement(v string, fn func(string)) { @@ -909,6 +896,14 @@ func (s *MiniServer) idleTimeout() time.Duration { return s.ReadTimeout } +func (s *MiniServer) readHeaderTimeout() time.Duration { + if s.ReadHeaderTimeout != 0 { + return s.ReadHeaderTimeout + } + + return s.ReadTimeout +} + // A ConnState represents the state of a client connection to a server. // It's used by the optional Server.ConnState hook. type ConnState int @@ -1588,12 +1583,74 @@ func (c *conn) serve(ctx context.Context) { } } -var errTooLarge = errors.New("http: request too large") +var textprotoReaderPool sync.Pool + +func newTextprotoReader(br *bufio.Reader) *textproto.Reader { + if v := textprotoReaderPool.Get(); v != nil { + tr := v.(*textproto.Reader) + tr.R = br + + return tr + } + + return textproto.NewReader(br) +} + +func putTextprotoReader(r *textproto.Reader) { + r.R = nil + + textprotoReaderPool.Put(r) +} // Read next request from connection. // TODO readRequest is incomplete func (c *conn) readRequest(ctx context.Context) (w *response, err error) { - return nil, err + var ( + wholeReqDeadline time.Time // or zero if none + hdrDeadline time.Time // or zero if none + ) + + t0 := time.Now() + + if d := c.server.readHeaderTimeout(); d > 0 { + hdrDeadline = t0.Add(d) + } + + if d := c.server.ReadTimeout; d > 0 { + wholeReqDeadline = t0.Add(d) + } + + //nolint:errcheck + c.rwc.SetReadDeadline(hdrDeadline) + + if d := c.server.WriteTimeout; d > 0 { + defer func() { + //nolint:errcheck + c.rwc.SetWriteDeadline(time.Now().Add(d)) + }() + } + + c.r.setReadLimit(c.server.initialReadLimitSize()) + + if c.lastMethod == http.MethodPost { + // RFC 7230 section 3 tolerance for old buggy clients. + peek, _ := c.bufr.Peek(4) // ReadRequest will get err below + //nolint:errcheck + c.bufr.Discard(NumLeadingCRorLF(peek)) + } + + req, err := readRequest(c.bufr) + if err != nil { + if c.r.hitReadLimit() { + return nil, errTooLarge + } + + return nil, err + } + + fmt.Println(req, wholeReqDeadline) + + return nil, nil } func (c *conn) close() { diff --git a/pkg/transfer.go b/pkg/transfer.go new file mode 100644 index 0000000..b120e08 --- /dev/null +++ b/pkg/transfer.go @@ -0,0 +1,389 @@ +package pkg + +import ( + "bufio" + "fmt" + "io" + "net/http" + "net/textproto" + "strconv" + + "golang.org/x/net/http/httpguts" + "myHttpServer/internal" +) + +// Determine whether to hang up after sending a request and body, or +// receiving a response and body +// 'header' is the request headers. +func shouldClose(major, minor int, header http.Header, removeCloseHeader bool) bool { + if major < 1 { + return true + } + + conv := header["Connection"] + hasClose := httpguts.HeaderValuesContainsToken(conv, "close") + + if major == 1 && minor == 0 { + return hasClose || !httpguts.HeaderValuesContainsToken(conv, "keep-alive") + } + + if hasClose && removeCloseHeader { + header.Del("Connection") + } + + return hasClose +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + + return true +} + +// parseContentLength trims whitespace from s and returns -1 if no value +// is set, or the value if it's >= 0. +func parseContentLength(cl string) (int64, error) { + cl = textproto.TrimString(cl) + if cl == "" { + return -1, nil + } + + n, err := strconv.ParseUint(cl, 10, 63) + if err != nil { + return 0, badStringError("bad Content-Length", cl) + } + + return int64(n), nil +} + +// Parse the trailer header. +func fixTrailer(header http.Header, chunked bool) (http.Header, error) { + vv, ok := header["Trailer"] + if !ok { + return nil, nil + } + + if !chunked { + // Trailer and no chunking: + // this is an invalid use case for trailer header. + // Nevertheless, no error will be returned and we + // let users decide if this is a valid HTTP message. + // The Trailer header will be kept in Response.Header + // but not populate Response.Trailer. + // See issue #27197. + return nil, nil + } + + header.Del("Trailer") + + trailer := make(http.Header) + + var err error + + for _, v := range vv { + foreachHeaderElement(v, func(key string) { + key = http.CanonicalHeaderKey(key) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + if err == nil { + err = badStringError("bad trailer key", key) + return + } + } + + trailer[key] = nil + }) + } + + if err != nil { + return nil, err + } + + if len(trailer) == 0 { + return nil, nil + } + + return trailer, nil +} + +func noResponseBodyExpected(requestMethod string) bool { + return requestMethod == http.MethodHead +} + +// Determine the expected body length, using RFC 7230 Section 3.3. This +// function is not a method, because ultimately it should be shared by +// ReadResponse and ReadRequest. +func fixLength(isResponse bool, status int, requestMethod string, header http.Header, chunked bool) (int64, error) { + isRequest := !isResponse + contentLens := header["Content-Length"] + + // Hardening against HTTP request smuggling + if len(contentLens) > 1 { + // Per RFC 7230 Section 3.3.2, prevent multiple + // Content-Length headers if they differ in value. + // If there are dups of the value, remove the dups. + // See Issue 16490. + first := textproto.TrimString(contentLens[0]) + for _, ct := range contentLens[1:] { + if first != textproto.TrimString(ct) { + return 0, fmt.Errorf( + "http: message cannot contain multiple Content-Length headers; got %q", + contentLens, + ) + } + } + + // deduplicate Content-Length + header.Del("Content-Length") + header.Add("Content-Length", first) + + contentLens = header["Content-Length"] + } + + // Logic based on response type or status + if isResponse && noResponseBodyExpected(requestMethod) { + return 0, nil + } + + if status/100 == 1 { + return 0, nil + } + + switch status { + case 204, 304: + return 0, nil + } + + // Logic based on Transfer-Encoding + if chunked { + return -1, nil + } + + // Logic based on Content-Length + var cl string + if len(contentLens) == 1 { + cl = textproto.TrimString(contentLens[0]) + } + + if cl != "" { + n, err := parseContentLength(cl) + if err != nil { + return -1, err + } + + return n, nil + } + + header.Del("Content-Length") + + if isRequest { + // RFC 7230 neither explicitly permits nor forbids an + // entity-body on a GET request so we permit one if + // declared, but we default to 0 here (not -1 below) + // if there's no mention of a body. + // Likewise, all other request methods are assumed to have + // no body if neither Transfer-Encoding chunked nor a + // Content-Length are set. + return 0, nil + } + + // Body-EOF logic based on other methods (like closing, or chunked coding) + return -1, nil +} + +type transferReader struct { + // Input + Header http.Header + StatusCode int + RequestMethod string + ProtoMajor int + ProtoMinor int + // Output + Body io.ReadCloser + ContentLength int64 + Chunked bool + Close bool + Trailer http.Header +} + +func (t *transferReader) protoAtLeast(m, n int) bool { + return t.ProtoMajor > m || (t.ProtoMajor == m && t.ProtoMinor >= n) +} + +// parseTransferEncoding sets t.Chunked based on the Transfer-Encoding header. +func (t *transferReader) parseTransferEncoding() error { + raw, present := t.Header["Transfer-Encoding"] + if !present { + return nil + } + + delete(t.Header, "Transfer-Encoding") + + // Issue 12785; ignore Transfer-Encoding on HTTP/1.0 requests. + if !t.protoAtLeast(1, 1) { + return nil + } + + // Like nginx, we only support a single Transfer-Encoding header field, and + // only if set to "chunked". This is one of the most security sensitive + // surfaces in HTTP/1.1 due to the risk of request smuggling, so we keep it + // strict and simple. + if len(raw) != 1 { + return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)} + } + + if !internal.EqualFold(raw[0], "chunked") { + return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])} + } + + // RFC 7230 3.3.2 says "A sender MUST NOT send a Content-Length header field + // in any message that contains a Transfer-Encoding header field." + // + // but also: "If a message is received with both a Transfer-Encoding and a + // Content-Length header field, the Transfer-Encoding overrides the + // Content-Length. Such a message might indicate an attempt to perform + // request smuggling (Section 9.5) or response splitting (Section 9.4) and + // ought to be handled as an error. A sender MUST remove the received + // Content-Length field prior to forwarding such a message downstream." + // + // Reportedly, these appear in the wild. + delete(t.Header, "Content-Length") + + t.Chunked = true + + return nil +} + +// msg is *Request or *Response. +func readTransfer(msg any, r *bufio.Reader) (err error) { + t := &transferReader{RequestMethod: "GET"} + + // Unify input + isResponse := false + + switch rr := msg.(type) { + case *http.Response: + t.Header = rr.Header + t.StatusCode = rr.StatusCode + t.ProtoMajor = rr.ProtoMajor + t.ProtoMinor = rr.ProtoMinor + t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header, true) + isResponse = true + + if rr.Request != nil { + t.RequestMethod = rr.Request.Method + } + case *http.Request: + t.Header = rr.Header + t.RequestMethod = rr.Method + t.ProtoMajor = rr.ProtoMajor + t.ProtoMinor = rr.ProtoMinor + // Transfer semantics for Requests are exactly like those for + // Responses with status code 200, responding to a GET method + t.StatusCode = 200 + t.Close = rr.Close + default: + panic("unexpected type") + } + + // Default to HTTP/1.1 + if t.ProtoMajor == 0 && t.ProtoMinor == 0 { + t.ProtoMajor, t.ProtoMinor = 1, 1 + } + + // Transfer-Encoding: chunked, and overriding Content-Length. + if err := t.parseTransferEncoding(); err != nil { + return err + } + + realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.Chunked) + if err != nil { + return err + } + + if isResponse && t.RequestMethod == http.MethodHead { + if n, err := parseContentLength(GetHeader(t.Header, "Content-Length")); err != nil { + return err + } else { + t.ContentLength = n + } + } else { + t.ContentLength = realLength + } + + // Trailer + t.Trailer, err = fixTrailer(t.Header, t.Chunked) + if err != nil { + return err + } + + // If there is no Content-Length or chunked Transfer-Encoding on a *Response + // and the status is not 1xx, 204 or 304, then the body is unbounded. + // See RFC 7230, section 3.3. + switch msg.(type) { + case *http.Response: + if realLength == -1 && !t.Chunked && bodyAllowedForStatus(t.StatusCode) { + // Unbounded body. + t.Close = true + } + } + + // Prepare body reader. ContentLength < 0 means chunked encoding + // or close connection when finished, since multipart is not supported yet + switch { + case t.Chunked: + if isResponse && (noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode)) { + t.Body = http.NoBody + } else { + t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close} + } + case realLength == 0: + t.Body = http.NoBody + case realLength > 0: + t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close} + default: + // realLength < 0, i.e. "Content-Length" not mentioned in header + if t.Close { + // Close semantics (i.e. HTTP/1.0) + t.Body = &body{src: r, closing: t.Close} + } else { + // Persistent connection (i.e. HTTP/1.1) + t.Body = http.NoBody + } + } + + // Unify output + switch rr := msg.(type) { + case *http.Request: + rr.Body = t.Body + rr.ContentLength = t.ContentLength + + if t.Chunked { + rr.TransferEncoding = []string{"chunked"} + } + + rr.Close = t.Close + rr.Trailer = t.Trailer + case *http.Response: + rr.Body = t.Body + rr.ContentLength = t.ContentLength + + if t.Chunked { + rr.TransferEncoding = []string{"chunked"} + } + + rr.Close = t.Close + rr.Trailer = t.Trailer + } + + return nil +} diff --git a/pkg/util.go b/pkg/util.go index 1e42f63..c16c749 100644 --- a/pkg/util.go +++ b/pkg/util.go @@ -1,8 +1,11 @@ package pkg import ( + "fmt" "net/http" + "strings" + "golang.org/x/net/http/httpguts" "myHttpServer/internal" ) @@ -69,3 +72,47 @@ func isTokenBoundary(b byte) bool { func ExpectsContinue(r *http.Request) bool { return hasToken(GetHeader(r.Header, "Expect"), "100-continue") } + +func NumLeadingCRorLF(v []byte) (n int) { + for _, b := range v { + if b == '\r' || b == '\n' { + n++ + continue + } + + break + } + + return +} + +func badStringError(what, val string) error { + return fmt.Errorf("%s %q", what, val) +} + +func isNotToken(r rune) bool { + return !httpguts.IsTokenRune(r) +} + +func ValidMethod(method string) bool { + /* + Method = "OPTIONS" ; Section 9.2 + | "GET" ; Section 9.3 + | "HEAD" ; Section 9.4 + | "POST" ; Section 9.5 + | "PUT" ; Section 9.6 + | "DELETE" ; Section 9.7 + | "TRACE" ; Section 9.8 + | "CONNECT" ; Section 9.9 + | extension-method + extension-method = token + token = 1* + */ + return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1 +} + +// isH2Upgrade reports whether r represents the http2 "client preface" +// magic string. +func isH2Upgrade(r *http.Request) bool { + return r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0" +}