From 14bb08c1df8db9ec6c8a05520d4eee67971235d9 Mon Sep 17 00:00:00 2001 From: Dimitri Sokolyuk Date: Thu, 27 Sep 2018 20:03:23 +0200 Subject: mod tidy --- vendor/golang.org/x/net/http2/.gitignore | 2 - vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/README | 20 - vendor/golang.org/x/net/http2/ciphers.go | 641 --- vendor/golang.org/x/net/http2/client_conn_pool.go | 256 - .../golang.org/x/net/http2/configure_transport.go | 80 - vendor/golang.org/x/net/http2/databuffer.go | 146 - vendor/golang.org/x/net/http2/errors.go | 133 - vendor/golang.org/x/net/http2/flow.go | 50 - vendor/golang.org/x/net/http2/frame.go | 1579 ------ vendor/golang.org/x/net/http2/go16.go | 16 - vendor/golang.org/x/net/http2/go17.go | 106 - vendor/golang.org/x/net/http2/go17_not18.go | 36 - vendor/golang.org/x/net/http2/go18.go | 56 - vendor/golang.org/x/net/http2/go19.go | 16 - vendor/golang.org/x/net/http2/gotrack.go | 170 - vendor/golang.org/x/net/http2/h2demo/.gitignore | 6 - vendor/golang.org/x/net/http2/h2demo/Dockerfile | 11 - vendor/golang.org/x/net/http2/h2demo/Dockerfile.0 | 134 - vendor/golang.org/x/net/http2/h2demo/Makefile | 55 - vendor/golang.org/x/net/http2/h2demo/README | 16 - .../x/net/http2/h2demo/deployment-prod.yaml | 28 - vendor/golang.org/x/net/http2/h2demo/h2demo.go | 543 --- vendor/golang.org/x/net/http2/h2demo/launch.go | 302 -- vendor/golang.org/x/net/http2/h2demo/rootCA.key | 27 - vendor/golang.org/x/net/http2/h2demo/rootCA.pem | 26 - vendor/golang.org/x/net/http2/h2demo/rootCA.srl | 1 - vendor/golang.org/x/net/http2/h2demo/server.crt | 20 - vendor/golang.org/x/net/http2/h2demo/server.key | 27 - vendor/golang.org/x/net/http2/h2demo/service.yaml | 17 - vendor/golang.org/x/net/http2/h2demo/tmpl.go | 1991 -------- vendor/golang.org/x/net/http2/h2i/README.md | 97 - vendor/golang.org/x/net/http2/h2i/h2i.go | 522 -- vendor/golang.org/x/net/http2/headermap.go | 78 - vendor/golang.org/x/net/http2/hpack/encode.go | 240 - vendor/golang.org/x/net/http2/hpack/hpack.go | 496 -- vendor/golang.org/x/net/http2/hpack/huffman.go | 212 - vendor/golang.org/x/net/http2/hpack/tables.go | 479 -- vendor/golang.org/x/net/http2/http2.go | 391 -- vendor/golang.org/x/net/http2/not_go16.go | 21 - vendor/golang.org/x/net/http2/not_go17.go | 87 - vendor/golang.org/x/net/http2/not_go18.go | 29 - vendor/golang.org/x/net/http2/not_go19.go | 16 - vendor/golang.org/x/net/http2/pipe.go | 163 - vendor/golang.org/x/net/http2/server.go | 2878 ----------- .../http2/testdata/draft-ietf-httpbis-http2.xml | 5021 -------------------- vendor/golang.org/x/net/http2/transport.go | 2310 --------- vendor/golang.org/x/net/http2/write.go | 365 -- vendor/golang.org/x/net/http2/writesched.go | 242 - .../golang.org/x/net/http2/writesched_priority.go | 452 -- vendor/golang.org/x/net/http2/writesched_random.go | 72 - 52 files changed, 20736 deletions(-) delete mode 100644 vendor/golang.org/x/net/http2/.gitignore delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile delete mode 100644 vendor/golang.org/x/net/http2/README delete mode 100644 vendor/golang.org/x/net/http2/ciphers.go delete mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 vendor/golang.org/x/net/http2/configure_transport.go delete mode 100644 vendor/golang.org/x/net/http2/databuffer.go delete mode 100644 vendor/golang.org/x/net/http2/errors.go delete mode 100644 vendor/golang.org/x/net/http2/flow.go delete mode 100644 vendor/golang.org/x/net/http2/frame.go delete mode 100644 vendor/golang.org/x/net/http2/go16.go delete mode 100644 vendor/golang.org/x/net/http2/go17.go delete mode 100644 vendor/golang.org/x/net/http2/go17_not18.go delete mode 100644 vendor/golang.org/x/net/http2/go18.go delete mode 100644 vendor/golang.org/x/net/http2/go19.go delete mode 100644 vendor/golang.org/x/net/http2/gotrack.go delete mode 100644 vendor/golang.org/x/net/http2/h2demo/.gitignore delete mode 100644 vendor/golang.org/x/net/http2/h2demo/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/h2demo/Dockerfile.0 delete mode 100644 vendor/golang.org/x/net/http2/h2demo/Makefile delete mode 100644 vendor/golang.org/x/net/http2/h2demo/README delete mode 100644 vendor/golang.org/x/net/http2/h2demo/deployment-prod.yaml delete mode 100644 vendor/golang.org/x/net/http2/h2demo/h2demo.go delete mode 100644 vendor/golang.org/x/net/http2/h2demo/launch.go delete mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.key delete mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.pem delete mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.srl delete mode 100644 vendor/golang.org/x/net/http2/h2demo/server.crt delete mode 100644 vendor/golang.org/x/net/http2/h2demo/server.key delete mode 100644 vendor/golang.org/x/net/http2/h2demo/service.yaml delete mode 100644 vendor/golang.org/x/net/http2/h2demo/tmpl.go delete mode 100644 vendor/golang.org/x/net/http2/h2i/README.md delete mode 100644 vendor/golang.org/x/net/http2/h2i/h2i.go delete mode 100644 vendor/golang.org/x/net/http2/headermap.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go delete mode 100644 vendor/golang.org/x/net/http2/http2.go delete mode 100644 vendor/golang.org/x/net/http2/not_go16.go delete mode 100644 vendor/golang.org/x/net/http2/not_go17.go delete mode 100644 vendor/golang.org/x/net/http2/not_go18.go delete mode 100644 vendor/golang.org/x/net/http2/not_go19.go delete mode 100644 vendor/golang.org/x/net/http2/pipe.go delete mode 100644 vendor/golang.org/x/net/http2/server.go delete mode 100644 vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml delete mode 100644 vendor/golang.org/x/net/http2/transport.go delete mode 100644 vendor/golang.org/x/net/http2/write.go delete mode 100644 vendor/golang.org/x/net/http2/writesched.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_random.go (limited to 'vendor/golang.org/x/net/http2') diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore deleted file mode 100644 index 190f122..0000000 --- a/vendor/golang.org/x/net/http2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*~ -h2i/h2i diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 53fc525..0000000 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826..0000000 --- a/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa..0000000 --- a/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go deleted file mode 100644 index c9a0cf3..0000000 --- a/vendor/golang.org/x/net/http2/ciphers.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -// A list of the possible cipher suite ids. Taken from -// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt - -const ( - cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 - cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 - cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 - cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 - cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 - cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 - cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 - cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 - cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 - cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 - cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A - cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B - cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C - cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D - cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E - cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F - cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 - cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 - cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 - cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 - cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 - cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 - cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 - cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 - cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 - cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 - cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A - cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B - // Reserved uint16 = 0x001C-1D - cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F - cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 - cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 - cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 - cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 - cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B - cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C - cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D - cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E - cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 - cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A - cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B - cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C - cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 - // Reserved uint16 = 0x0047-4F - // Reserved uint16 = 0x0050-58 - // Reserved uint16 = 0x0059-5C - // Unassigned uint16 = 0x005D-5F - // Reserved uint16 = 0x0060-66 - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D - // Unassigned uint16 = 0x006E-83 - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 - cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A - cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B - cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C - cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D - cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E - cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 - cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 - cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 - cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 - cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 - cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 - cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 - cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A - cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B - cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C - cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D - cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E - cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F - cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 - cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 - cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 - cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 - cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 - cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 - cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 - cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 - cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 - cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 - cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA - cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB - cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC - cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD - cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE - cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF - cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 - cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 - cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 - cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 - cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 - cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 - // Unassigned uint16 = 0x00C6-FE - cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF - // Unassigned uint16 = 0x01-55,* - cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 - // Unassigned uint16 = 0x5601 - 0xC000 - cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 - cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 - cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 - cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 - cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 - cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A - cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B - cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C - cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F - cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 - cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 - cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 - cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 - cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 - cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 - cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 - cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 - cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A - cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B - cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C - cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D - cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E - cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F - cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 - cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 - cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C - cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D - cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E - cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F - cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 - cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 - cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 - cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 - cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B - cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C - cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D - cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E - cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F - cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 - cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 - cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 - cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 - cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 - cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 - cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 - cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B - cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C - cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D - cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E - cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F - cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 - cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 - cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 - cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 - cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 - cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 - cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 - cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 - cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 - cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 - cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A - cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F - cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 - cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 - cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 - cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 - cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 - cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 - cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 - cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 - cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 - cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 - cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A - cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B - cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C - cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D - cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E - cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F - cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 - cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 - cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A - cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 - cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 - cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D - cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E - cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F - cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 - cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 - cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 - cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 - cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 - cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B - cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C - cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D - cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E - cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F - cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 - cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 - cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 - cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 - cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 - cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 - cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 - cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 - cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 - cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 - cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA - cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF - // Unassigned uint16 = 0xC0B0-FF - // Unassigned uint16 = 0xC1-CB,* - // Unassigned uint16 = 0xCC00-A7 - cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 - cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 - cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA - cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB - cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC - cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD - cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE -) - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -// References: -// https://tools.ietf.org/html/rfc7540#appendix-A -// Reject cipher suites from Appendix A. -// "This list includes those cipher suites that do not -// offer an ephemeral key exchange and those that are -// based on the TLS null, stream or block cipher type" -func isBadCipher(cipher uint16) bool { - switch cipher { - case cipher_TLS_NULL_WITH_NULL_NULL, - cipher_TLS_RSA_WITH_NULL_MD5, - cipher_TLS_RSA_WITH_NULL_SHA, - cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, - cipher_TLS_RSA_WITH_RC4_128_MD5, - cipher_TLS_RSA_WITH_RC4_128_SHA, - cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, - cipher_TLS_RSA_WITH_IDEA_CBC_SHA, - cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_RSA_WITH_DES_CBC_SHA, - cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, - cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, - cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, - cipher_TLS_DH_anon_WITH_RC4_128_MD5, - cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DH_anon_WITH_DES_CBC_SHA, - cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_KRB5_WITH_DES_CBC_SHA, - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_KRB5_WITH_RC4_128_SHA, - cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, - cipher_TLS_KRB5_WITH_DES_CBC_MD5, - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, - cipher_TLS_KRB5_WITH_RC4_128_MD5, - cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, - cipher_TLS_PSK_WITH_NULL_SHA, - cipher_TLS_DHE_PSK_WITH_NULL_SHA, - cipher_TLS_RSA_PSK_WITH_NULL_SHA, - cipher_TLS_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, - cipher_TLS_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_WITH_NULL_SHA256, - cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_PSK_WITH_RC4_128_SHA, - cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, - cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, - cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_WITH_SEED_CBC_SHA, - cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, - cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, - cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, - cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, - cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, - cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, - cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, - cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, - cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, - cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, - cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, - cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_PSK_WITH_NULL_SHA256, - cipher_TLS_PSK_WITH_NULL_SHA384, - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_DHE_PSK_WITH_NULL_SHA256, - cipher_TLS_DHE_PSK_WITH_NULL_SHA384, - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_RSA_PSK_WITH_NULL_SHA256, - cipher_TLS_RSA_PSK_WITH_NULL_SHA384, - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, - cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, - cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, - cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDH_RSA_WITH_NULL_SHA, - cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, - cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, - cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, - cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDH_anon_WITH_NULL_SHA, - cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, - cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, - cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, - cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, - cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, - cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, - cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, - cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_RSA_WITH_AES_128_CCM, - cipher_TLS_RSA_WITH_AES_256_CCM, - cipher_TLS_RSA_WITH_AES_128_CCM_8, - cipher_TLS_RSA_WITH_AES_256_CCM_8, - cipher_TLS_PSK_WITH_AES_128_CCM, - cipher_TLS_PSK_WITH_AES_256_CCM, - cipher_TLS_PSK_WITH_AES_128_CCM_8, - cipher_TLS_PSK_WITH_AES_256_CCM_8: - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go deleted file mode 100644 index bdf5652..0000000 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code's client connection pooling. - -package http2 - -import ( - "crypto/tls" - "net/http" - "sync" -) - -// ClientConnPool manages a pool of HTTP/2 client connections. -type ClientConnPool interface { - GetClientConn(req *http.Request, addr string) (*ClientConn, error) - MarkDead(*ClientConn) -} - -// clientConnPoolIdleCloser is the interface implemented by ClientConnPool -// implementations which can close their idle connections. -type clientConnPoolIdleCloser interface { - ClientConnPool - closeIdleConnections() -} - -var ( - _ clientConnPoolIdleCloser = (*clientConnPool)(nil) - _ clientConnPoolIdleCloser = noDialClientConnPool{} -) - -// TODO: use singleflight for dialing and addConnCalls? -type clientConnPool struct { - t *Transport - - mu sync.Mutex // TODO: maybe switch to RWMutex - // TODO: add support for sharing conns based on cert names - // (e.g. share conn for googleapis.com and appspot.com) - conns map[string][]*ClientConn // key is host:port - dialing map[string]*dialCall // currently in-flight dials - keys map[*ClientConn][]string - addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls -} - -func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, dialOnMiss) -} - -const ( - dialOnMiss = true - noDialOnMiss = false -) - -func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { - if isConnectionCloseRequest(req) && dialOnMiss { - // It gets its own connection. - const singleUse = true - cc, err := p.t.dialClientConn(addr, singleUse) - if err != nil { - return nil, err - } - return cc, nil - } - p.mu.Lock() - for _, cc := range p.conns[addr] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return cc, nil - } - } - if !dialOnMiss { - p.mu.Unlock() - return nil, ErrNoCachedConn - } - call := p.getStartDialLocked(addr) - p.mu.Unlock() - <-call.done - return call.res, call.err -} - -// dialCall is an in-flight Transport dial call to a host. -type dialCall struct { - p *clientConnPool - done chan struct{} // closed when done - res *ClientConn // valid after done is closed - err error // valid after done is closed -} - -// requires p.mu is held. -func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { - if call, ok := p.dialing[addr]; ok { - // A dial is already in-flight. Don't start another. - return call - } - call := &dialCall{p: p, done: make(chan struct{})} - if p.dialing == nil { - p.dialing = make(map[string]*dialCall) - } - p.dialing[addr] = call - go call.dial(addr) - return call -} - -// run in its own goroutine. -func (c *dialCall) dial(addr string) { - const singleUse = false // shared conn - c.res, c.err = c.p.t.dialClientConn(addr, singleUse) - close(c.done) - - c.p.mu.Lock() - delete(c.p.dialing, addr) - if c.err == nil { - c.p.addConnLocked(addr, c.res) - } - c.p.mu.Unlock() -} - -// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't -// already exist. It coalesces concurrent calls with the same key. -// This is used by the http1 Transport code when it creates a new connection. Because -// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know -// the protocol), it can get into a situation where it has multiple TLS connections. -// This code decides which ones live or die. -// The return value used is whether c was used. -// c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { - p.mu.Lock() - for _, cc := range p.conns[key] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return false, nil - } - } - call, dup := p.addConnCalls[key] - if !dup { - if p.addConnCalls == nil { - p.addConnCalls = make(map[string]*addConnCall) - } - call = &addConnCall{ - p: p, - done: make(chan struct{}), - } - p.addConnCalls[key] = call - go call.run(t, key, c) - } - p.mu.Unlock() - - <-call.done - if call.err != nil { - return false, call.err - } - return !dup, nil -} - -type addConnCall struct { - p *clientConnPool - done chan struct{} // closed when done - err error -} - -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) - - p := c.p - p.mu.Lock() - if err != nil { - c.err = err - } else { - p.addConnLocked(key, cc) - } - delete(p.addConnCalls, key) - p.mu.Unlock() - close(c.done) -} - -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - -// p.mu must be held -func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { - for _, v := range p.conns[key] { - if v == cc { - return - } - } - if p.conns == nil { - p.conns = make(map[string][]*ClientConn) - } - if p.keys == nil { - p.keys = make(map[*ClientConn][]string) - } - p.conns[key] = append(p.conns[key], cc) - p.keys[cc] = append(p.keys[cc], key) -} - -func (p *clientConnPool) MarkDead(cc *ClientConn) { - p.mu.Lock() - defer p.mu.Unlock() - for _, key := range p.keys[cc] { - vv, ok := p.conns[key] - if !ok { - continue - } - newList := filterOutClientConn(vv, cc) - if len(newList) > 0 { - p.conns[key] = newList - } else { - delete(p.conns, key) - } - } - delete(p.keys, cc) -} - -func (p *clientConnPool) closeIdleConnections() { - p.mu.Lock() - defer p.mu.Unlock() - // TODO: don't close a cc if it was just added to the pool - // milliseconds ago and has never been used. There's currently - // a small race window with the HTTP/1 Transport's integration - // where it can add an idle conn just before using it, and - // somebody else can concurrently call CloseIdleConns and - // break some caller's RoundTrip. - for _, vv := range p.conns { - for _, cc := range vv { - cc.closeIfIdle() - } - } -} - -func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { - out := in[:0] - for _, v := range in { - if v != exclude { - out = append(out, v) - } - } - // If we filtered it out, zero out the last item to prevent - // the GC from seeing it. - if len(in) != len(out) { - in[len(in)-1] = nil - } - return out -} - -// noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS -// connection instead. -type noDialClientConnPool struct{ *clientConnPool } - -func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, noDialOnMiss) -} diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go deleted file mode 100644 index 088d6e2..0000000 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "fmt" - "net/http" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - connPool := new(clientConnPool) - t2 := &Transport{ - ConnPool: noDialClientConnPool{connPool}, - t1: t1, - } - connPool.t = t2 - if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { - return nil, err - } - if t1.TLSClientConfig == nil { - t1.TLSClientConfig = new(tls.Config) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { - t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { - t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") - } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) - if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { - go c.Close() - return erringRoundTripper{err} - } else if !used { - // Turns out we don't need this c. - // For example, two goroutines made requests to the same host - // at the same time, both kicking off TCP dials. (since protocol - // was unknown) - go c.Close() - } - return t2 - } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, - } - } else { - m["h2"] = upgradeFn - } - return t2, nil -} - -// registerHTTPSProtocol calls Transport.RegisterProtocol but -// converting panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - } - }() - t.RegisterProtocol("https", rt) - return nil -} - -// noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. -type noDialH2RoundTripper struct{ t *Transport } - -func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.t.RoundTrip(req) - if isNoCachedConnError(err) { - return nil, http.ErrSkipAltProtocol - } - return res, err -} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go deleted file mode 100644 index a3067f8..0000000 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "fmt" - "sync" -) - -// Buffer chunks are allocated from a pool to reduce pressure on GC. -// The maximum wasted space per dataBuffer is 2x the largest size class, -// which happens when the dataBuffer has multiple chunks and there is -// one unread byte in both the first and last chunks. We use a few size -// classes to minimize overheads for servers that typically receive very -// small request bodies. -// -// TODO: Benchmark to determine if the pools are necessary. The GC may have -// improved enough that we can instead allocate chunks like this: -// make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) - -func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } - } - return dataChunkPools[i].Get().([]byte) -} - -func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } - } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) -} - -// dataBuffer is an io.ReadWriter backed by a list of data chunks. -// Each dataBuffer is used to read DATA frames on a single stream. -// The buffer is divided into chunks so the server can limit the -// total memory used by a single connection without limiting the -// request body size on any single stream. -type dataBuffer struct { - chunks [][]byte - r int // next byte to read is chunks[0][r] - w int // next byte to write is chunks[len(chunks)-1][w] - size int // total buffered bytes - expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) -} - -var errReadEmpty = errors.New("read from empty dataBuffer") - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *dataBuffer) Read(p []byte) (int, error) { - if b.size == 0 { - return 0, errReadEmpty - } - var ntotal int - for len(p) > 0 && b.size > 0 { - readFrom := b.bytesFromFirstChunk() - n := copy(p, readFrom) - p = p[n:] - ntotal += n - b.r += n - b.size -= n - // If the first chunk has been consumed, advance to the next chunk. - if b.r == len(b.chunks[0]) { - putDataBufferChunk(b.chunks[0]) - end := len(b.chunks) - 1 - copy(b.chunks[:end], b.chunks[1:]) - b.chunks[end] = nil - b.chunks = b.chunks[:end] - b.r = 0 - } - } - return ntotal, nil -} - -func (b *dataBuffer) bytesFromFirstChunk() []byte { - if len(b.chunks) == 1 { - return b.chunks[0][b.r:b.w] - } - return b.chunks[0][b.r:] -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *dataBuffer) Len() int { - return b.size -} - -// Write appends p to the buffer. -func (b *dataBuffer) Write(p []byte) (int, error) { - ntotal := len(p) - for len(p) > 0 { - // If the last chunk is empty, allocate a new chunk. Try to allocate - // enough to fully copy p plus any additional bytes we expect to - // receive. However, this may allocate less than len(p). - want := int64(len(p)) - if b.expected > want { - want = b.expected - } - chunk := b.lastChunkOrAlloc(want) - n := copy(chunk[b.w:], p) - p = p[n:] - b.w += n - b.size += n - b.expected -= int64(n) - } - return ntotal, nil -} - -func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { - if len(b.chunks) != 0 { - last := b.chunks[len(b.chunks)-1] - if b.w < len(last) { - return last - } - } - chunk := getDataBufferChunk(want) - b.chunks = append(b.chunks, chunk) - b.w = 0 - return chunk -} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go deleted file mode 100644 index 71f2c46..0000000 --- a/vendor/golang.org/x/net/http2/errors.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "fmt" -) - -// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. -type ErrCode uint32 - -const ( - ErrCodeNo ErrCode = 0x0 - ErrCodeProtocol ErrCode = 0x1 - ErrCodeInternal ErrCode = 0x2 - ErrCodeFlowControl ErrCode = 0x3 - ErrCodeSettingsTimeout ErrCode = 0x4 - ErrCodeStreamClosed ErrCode = 0x5 - ErrCodeFrameSize ErrCode = 0x6 - ErrCodeRefusedStream ErrCode = 0x7 - ErrCodeCancel ErrCode = 0x8 - ErrCodeCompression ErrCode = 0x9 - ErrCodeConnect ErrCode = 0xa - ErrCodeEnhanceYourCalm ErrCode = 0xb - ErrCodeInadequateSecurity ErrCode = 0xc - ErrCodeHTTP11Required ErrCode = 0xd -) - -var errCodeName = map[ErrCode]string{ - ErrCodeNo: "NO_ERROR", - ErrCodeProtocol: "PROTOCOL_ERROR", - ErrCodeInternal: "INTERNAL_ERROR", - ErrCodeFlowControl: "FLOW_CONTROL_ERROR", - ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", - ErrCodeStreamClosed: "STREAM_CLOSED", - ErrCodeFrameSize: "FRAME_SIZE_ERROR", - ErrCodeRefusedStream: "REFUSED_STREAM", - ErrCodeCancel: "CANCEL", - ErrCodeCompression: "COMPRESSION_ERROR", - ErrCodeConnect: "CONNECT_ERROR", - ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", - ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", - ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", -} - -func (e ErrCode) String() string { - if s, ok := errCodeName[e]; ok { - return s - } - return fmt.Sprintf("unknown error code 0x%x", uint32(e)) -} - -// ConnectionError is an error that results in the termination of the -// entire connection. -type ConnectionError ErrCode - -func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } - -// StreamError is an error that only affects one stream within an -// HTTP/2 connection. -type StreamError struct { - StreamID uint32 - Code ErrCode - Cause error // optional additional detail -} - -func streamError(id uint32, code ErrCode) StreamError { - return StreamError{StreamID: id, Code: code} -} - -func (e StreamError) Error() string { - if e.Cause != nil { - return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) - } - return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) -} - -// 6.9.1 The Flow Control Window -// "If a sender receives a WINDOW_UPDATE that causes a flow control -// window to exceed this maximum it MUST terminate either the stream -// or the connection, as appropriate. For streams, [...]; for the -// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." -type goAwayFlowError struct{} - -func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } - -// connError represents an HTTP/2 ConnectionError error code, along -// with a string (for debugging) explaining why. -// -// Errors of this type are only returned by the frame parser functions -// and converted into ConnectionError(Code), after stashing away -// the Reason into the Framer's errDetail field, accessible via -// the (*Framer).ErrorDetail method. -type connError struct { - Code ErrCode // the ConnectionError error code - Reason string // additional reason -} - -func (e connError) Error() string { - return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) -} - -type pseudoHeaderError string - -func (e pseudoHeaderError) Error() string { - return fmt.Sprintf("invalid pseudo-header %q", string(e)) -} - -type duplicatePseudoHeaderError string - -func (e duplicatePseudoHeaderError) Error() string { - return fmt.Sprintf("duplicate pseudo-header %q", string(e)) -} - -type headerFieldNameError string - -func (e headerFieldNameError) Error() string { - return fmt.Sprintf("invalid header field name %q", string(e)) -} - -type headerFieldValueError string - -func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) -} - -var ( - errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") - errPseudoAfterRegular = errors.New("pseudo header field after regular") -) diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go deleted file mode 100644 index cea601f..0000000 --- a/vendor/golang.org/x/net/http2/flow.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Flow control - -package http2 - -// flow is the flow control window's size. -type flow struct { - // n is the number of DATA bytes we're allowed to send. - // A flow is kept both on a conn and a per-stream. - n int32 - - // conn points to the shared connection-level flow that is - // shared by all streams on that conn. It is nil for the flow - // that's on the conn directly. - conn *flow -} - -func (f *flow) setConnFlow(cf *flow) { f.conn = cf } - -func (f *flow) available() int32 { - n := f.n - if f.conn != nil && f.conn.n < n { - n = f.conn.n - } - return n -} - -func (f *flow) take(n int32) { - if n > f.available() { - panic("internal error: took too much") - } - f.n -= n - if f.conn != nil { - f.conn.n -= n - } -} - -// add adds n bytes (positive or negative) to the flow control window. -// It returns false if the sum would exceed 2^31-1. -func (f *flow) add(n int32) bool { - sum := f.n + n - if (sum > n) == (f.n > 0) { - f.n = sum - return true - } - return false -} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go deleted file mode 100644 index e325007..0000000 --- a/vendor/golang.org/x/net/http2/frame.go +++ /dev/null @@ -1,1579 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "strings" - "sync" - - "golang.org/x/net/http/httpguts" - "golang.org/x/net/http2/hpack" -) - -const frameHeaderLen = 9 - -var padZeros = make([]byte, 255) // zeros for padding - -// A FrameType is a registered frame type as defined in -// http://http2.github.io/http2-spec/#rfc.section.11.2 -type FrameType uint8 - -const ( - FrameData FrameType = 0x0 - FrameHeaders FrameType = 0x1 - FramePriority FrameType = 0x2 - FrameRSTStream FrameType = 0x3 - FrameSettings FrameType = 0x4 - FramePushPromise FrameType = 0x5 - FramePing FrameType = 0x6 - FrameGoAway FrameType = 0x7 - FrameWindowUpdate FrameType = 0x8 - FrameContinuation FrameType = 0x9 -) - -var frameName = map[FrameType]string{ - FrameData: "DATA", - FrameHeaders: "HEADERS", - FramePriority: "PRIORITY", - FrameRSTStream: "RST_STREAM", - FrameSettings: "SETTINGS", - FramePushPromise: "PUSH_PROMISE", - FramePing: "PING", - FrameGoAway: "GOAWAY", - FrameWindowUpdate: "WINDOW_UPDATE", - FrameContinuation: "CONTINUATION", -} - -func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s - } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) -} - -// Flags is a bitmask of HTTP/2 flags. -// The meaning of flags varies depending on the frame type. -type Flags uint8 - -// Has reports whether f contains all (0 or more) flags in v. -func (f Flags) Has(v Flags) bool { - return (f & v) == v -} - -// Frame-specific FrameHeader flag bits. -const ( - // Data Frame - FlagDataEndStream Flags = 0x1 - FlagDataPadded Flags = 0x8 - - // Headers Frame - FlagHeadersEndStream Flags = 0x1 - FlagHeadersEndHeaders Flags = 0x4 - FlagHeadersPadded Flags = 0x8 - FlagHeadersPriority Flags = 0x20 - - // Settings Frame - FlagSettingsAck Flags = 0x1 - - // Ping Frame - FlagPingAck Flags = 0x1 - - // Continuation Frame - FlagContinuationEndHeaders Flags = 0x4 - - FlagPushPromiseEndHeaders Flags = 0x4 - FlagPushPromisePadded Flags = 0x8 -) - -var flagName = map[FrameType]map[Flags]string{ - FrameData: { - FlagDataEndStream: "END_STREAM", - FlagDataPadded: "PADDED", - }, - FrameHeaders: { - FlagHeadersEndStream: "END_STREAM", - FlagHeadersEndHeaders: "END_HEADERS", - FlagHeadersPadded: "PADDED", - FlagHeadersPriority: "PRIORITY", - }, - FrameSettings: { - FlagSettingsAck: "ACK", - }, - FramePing: { - FlagPingAck: "ACK", - }, - FrameContinuation: { - FlagContinuationEndHeaders: "END_HEADERS", - }, - FramePushPromise: { - FlagPushPromiseEndHeaders: "END_HEADERS", - FlagPushPromisePadded: "PADDED", - }, -} - -// a frameParser parses a frame given its FrameHeader and payload -// bytes. The length of payload will always equal fh.Length (which -// might be 0). -type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) - -var frameParsers = map[FrameType]frameParser{ - FrameData: parseDataFrame, - FrameHeaders: parseHeadersFrame, - FramePriority: parsePriorityFrame, - FrameRSTStream: parseRSTStreamFrame, - FrameSettings: parseSettingsFrame, - FramePushPromise: parsePushPromise, - FramePing: parsePingFrame, - FrameGoAway: parseGoAwayFrame, - FrameWindowUpdate: parseWindowUpdateFrame, - FrameContinuation: parseContinuationFrame, -} - -func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f - } - return parseUnknownFrame -} - -// A FrameHeader is the 9 byte header of all HTTP/2 frames. -// -// See http://http2.github.io/http2-spec/#FrameHeader -type FrameHeader struct { - valid bool // caller can access []byte fields in the Frame - - // Type is the 1 byte frame type. There are ten standard frame - // types, but extension frame types may be written by WriteRawFrame - // and will be returned by ReadFrame (as UnknownFrame). - Type FrameType - - // Flags are the 1 byte of 8 potential bit flags per frame. - // They are specific to the frame type. - Flags Flags - - // Length is the length of the frame, not including the 9 byte header. - // The maximum size is one byte less than 16MB (uint24), but only - // frames up to 16KB are allowed without peer agreement. - Length uint32 - - // StreamID is which stream this frame is for. Certain frames - // are not stream-specific, in which case this field is 0. - StreamID uint32 -} - -// Header returns h. It exists so FrameHeaders can be embedded in other -// specific frame types and implement the Frame interface. -func (h FrameHeader) Header() FrameHeader { return h } - -func (h FrameHeader) String() string { - var buf bytes.Buffer - buf.WriteString("[FrameHeader ") - h.writeDebug(&buf) - buf.WriteByte(']') - return buf.String() -} - -func (h FrameHeader) writeDebug(buf *bytes.Buffer) { - buf.WriteString(h.Type.String()) - if h.Flags != 0 { - buf.WriteString(" flags=") - set := 0 - for i := uint8(0); i < 8; i++ { - if h.Flags&(1< 1 { - buf.WriteByte('|') - } - name := flagName[h.Type][Flags(1<>24), - byte(streamID>>16), - byte(streamID>>8), - byte(streamID)) -} - -func (f *Framer) endWrite() error { - // Now that we know the final size, fill in the FrameHeader in - // the space previously reserved for it. Abuse append. - length := len(f.wbuf) - frameHeaderLen - if length >= (1 << 24) { - return ErrFrameTooLarge - } - _ = append(f.wbuf[:0], - byte(length>>16), - byte(length>>8), - byte(length)) - if f.logWrites { - f.logWrite() - } - - n, err := f.w.Write(f.wbuf) - if err == nil && n != len(f.wbuf) { - err = io.ErrShortWrite - } - return err -} - -func (f *Framer) logWrite() { - if f.debugFramer == nil { - f.debugFramerBuf = new(bytes.Buffer) - f.debugFramer = NewFramer(nil, f.debugFramerBuf) - f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below - // Let us read anything, even if we accidentally wrote it - // in the wrong order: - f.debugFramer.AllowIllegalReads = true - } - f.debugFramerBuf.Write(f.wbuf) - fr, err := f.debugFramer.ReadFrame() - if err != nil { - f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) - return - } - f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) -} - -func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } -func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } -func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } -func (f *Framer) writeUint32(v uint32) { - f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -const ( - minMaxFrameSize = 1 << 14 - maxFrameSize = 1<<24 - 1 -) - -// SetReuseFrames allows the Framer to reuse Frames. -// If called on a Framer, Frames returned by calls to ReadFrame are only -// valid until the next call to ReadFrame. -func (fr *Framer) SetReuseFrames() { - if fr.frameCache != nil { - return - } - fr.frameCache = &frameCache{} -} - -type frameCache struct { - dataFrame DataFrame -} - -func (fc *frameCache) getDataFrame() *DataFrame { - if fc == nil { - return &DataFrame{} - } - return &fc.dataFrame -} - -// NewFramer returns a Framer that writes frames to w and reads them from r. -func NewFramer(w io.Writer, r io.Reader) *Framer { - fr := &Framer{ - w: w, - r: r, - logReads: logFrameReads, - logWrites: logFrameWrites, - debugReadLoggerf: log.Printf, - debugWriteLoggerf: log.Printf, - } - fr.getReadBuf = func(size uint32) []byte { - if cap(fr.readBuf) >= int(size) { - return fr.readBuf[:size] - } - fr.readBuf = make([]byte, size) - return fr.readBuf - } - fr.SetMaxReadFrameSize(maxFrameSize) - return fr -} - -// SetMaxReadFrameSize sets the maximum size of a frame -// that will be read by a subsequent call to ReadFrame. -// It is the caller's responsibility to advertise this -// limit with a SETTINGS frame. -func (fr *Framer) SetMaxReadFrameSize(v uint32) { - if v > maxFrameSize { - v = maxFrameSize - } - fr.maxReadSize = v -} - -// ErrorDetail returns a more detailed error of the last error -// returned by Framer.ReadFrame. For instance, if ReadFrame -// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail -// will say exactly what was invalid. ErrorDetail is not guaranteed -// to return a non-nil value and like the rest of the http2 package, -// its return value is not protected by an API compatibility promise. -// ErrorDetail is reset after the next call to ReadFrame. -func (fr *Framer) ErrorDetail() error { - return fr.errDetail -} - -// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer -// sends a frame that is larger than declared with SetMaxReadFrameSize. -var ErrFrameTooLarge = errors.New("http2: frame too large") - -// terminalReadFrameError reports whether err is an unrecoverable -// error from ReadFrame and no other frames should be read. -func terminalReadFrameError(err error) bool { - if _, ok := err.(StreamError); ok { - return false - } - return err != nil -} - -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. -// -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. -func (fr *Framer) ReadFrame() (Frame, error) { - fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } - fh, err := readFrameHeader(fr.headerBuf[:], fr.r) - if err != nil { - return nil, err - } - if fh.Length > fr.maxReadSize { - return nil, ErrFrameTooLarge - } - payload := fr.getReadBuf(fh.Length) - if _, err := io.ReadFull(fr.r, payload); err != nil { - return nil, err - } - f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) - if err != nil { - if ce, ok := err.(connError); ok { - return nil, fr.connError(ce.Code, ce.Reason) - } - return nil, err - } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } - if fr.logReads { - fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) - } - if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { - return fr.readMetaFrame(f.(*HeadersFrame)) - } - return f, nil -} - -// connError returns ConnectionError(code) but first -// stashes away a public reason to the caller can optionally relay it -// to the peer before hanging up on them. This might help others debug -// their implementations. -func (fr *Framer) connError(code ErrCode, reason string) error { - fr.errDetail = errors.New(reason) - return ConnectionError(code) -} - -// checkFrameOrder reports an error if f is an invalid frame to return -// next from ReadFrame. Mostly it checks whether HEADERS and -// CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f - if fr.AllowIllegalReads { - return nil - } - - fh := f.Header() - if fr.lastHeaderStream != 0 { - if fh.Type != FrameContinuation { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", - fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) - } - if fh.StreamID != fr.lastHeaderStream { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", - fh.StreamID, fr.lastHeaderStream)) - } - } else if fh.Type == FrameContinuation { - return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) - } - - switch fh.Type { - case FrameHeaders, FrameContinuation: - if fh.Flags.Has(FlagHeadersEndHeaders) { - fr.lastHeaderStream = 0 - } else { - fr.lastHeaderStream = fh.StreamID - } - } - - return nil -} - -// A DataFrame conveys arbitrary, variable-length sequences of octets -// associated with a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.1 -type DataFrame struct { - FrameHeader - data []byte -} - -func (f *DataFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagDataEndStream) -} - -// Data returns the frame's data octets, not including any padding -// size byte or padding suffix bytes. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *DataFrame) Data() []byte { - f.checkValid() - return f.data -} - -func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - // DATA frames MUST be associated with a stream. If a - // DATA frame is received whose stream identifier - // field is 0x0, the recipient MUST respond with a - // connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} - } - f := fc.getDataFrame() - f.FrameHeader = fh - - var padSize byte - if fh.Flags.Has(FlagDataPadded) { - var err error - payload, padSize, err = readByte(payload) - if err != nil { - return nil, err - } - } - if int(padSize) > len(payload) { - // If the length of the padding is greater than the - // length of the frame payload, the recipient MUST - // treat this as a connection error. - // Filed: https://github.com/http2/http2-spec/issues/610 - return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} - } - f.data = payload[:len(payload)-int(padSize)] - return f, nil -} - -var ( - errStreamID = errors.New("invalid stream ID") - errDepStreamID = errors.New("invalid dependent stream ID") - errPadLength = errors.New("pad length too large") - errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") -) - -func validStreamIDOrZero(streamID uint32) bool { - return streamID&(1<<31) == 0 -} - -func validStreamID(streamID uint32) bool { - return streamID != 0 && streamID&(1<<31) == 0 -} - -// WriteData writes a DATA frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { - return f.WriteDataPadded(streamID, endStream, data, nil) -} - -// WriteData writes a DATA frame with optional padding. -// -// If pad is nil, the padding bit is not sent. -// The length of pad must not exceed 255 bytes. -// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if len(pad) > 0 { - if len(pad) > 255 { - return errPadLength - } - if !f.AllowIllegalWrites { - for _, b := range pad { - if b != 0 { - // "Padding octets MUST be set to zero when sending." - return errPadBytes - } - } - } - } - var flags Flags - if endStream { - flags |= FlagDataEndStream - } - if pad != nil { - flags |= FlagDataPadded - } - f.startWrite(FrameData, flags, streamID) - if pad != nil { - f.wbuf = append(f.wbuf, byte(len(pad))) - } - f.wbuf = append(f.wbuf, data...) - f.wbuf = append(f.wbuf, pad...) - return f.endWrite() -} - -// A SettingsFrame conveys configuration parameters that affect how -// endpoints communicate, such as preferences and constraints on peer -// behavior. -// -// See http://http2.github.io/http2-spec/#SETTINGS -type SettingsFrame struct { - FrameHeader - p []byte -} - -func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { - // When this (ACK 0x1) bit is set, the payload of the - // SETTINGS frame MUST be empty. Receipt of a - // SETTINGS frame with the ACK flag set and a length - // field value other than 0 MUST be treated as a - // connection error (Section 5.4.1) of type - // FRAME_SIZE_ERROR. - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - // SETTINGS frames always apply to a connection, - // never a single stream. The stream identifier for a - // SETTINGS frame MUST be zero (0x0). If an endpoint - // receives a SETTINGS frame whose stream identifier - // field is anything other than 0x0, the endpoint MUST - // respond with a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p)%6 != 0 { - // Expecting even number of 6 byte settings. - return nil, ConnectionError(ErrCodeFrameSize) - } - f := &SettingsFrame{FrameHeader: fh, p: p} - if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { - // Values above the maximum flow control window size of 2^31 - 1 MUST - // be treated as a connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - return nil, ConnectionError(ErrCodeFlowControl) - } - return f, nil -} - -func (f *SettingsFrame) IsAck() bool { - return f.FrameHeader.Flags.Has(FlagSettingsAck) -} - -func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { - f.checkValid() - buf := f.p - for len(buf) > 0 { - settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) - if settingID == s { - return binary.BigEndian.Uint32(buf[2:6]), true - } - buf = buf[6:] - } - return 0, false -} - -// ForeachSetting runs fn for each setting. -// It stops and returns the first error. -func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { - f.checkValid() - buf := f.p - for len(buf) > 0 { - if err := fn(Setting{ - SettingID(binary.BigEndian.Uint16(buf[:2])), - binary.BigEndian.Uint32(buf[2:6]), - }); err != nil { - return err - } - buf = buf[6:] - } - return nil -} - -// WriteSettings writes a SETTINGS frame with zero or more settings -// specified and the ACK bit not set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettings(settings ...Setting) error { - f.startWrite(FrameSettings, 0, 0) - for _, s := range settings { - f.writeUint16(uint16(s.ID)) - f.writeUint32(s.Val) - } - return f.endWrite() -} - -// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettingsAck() error { - f.startWrite(FrameSettings, FlagSettingsAck, 0) - return f.endWrite() -} - -// A PingFrame is a mechanism for measuring a minimal round trip time -// from the sender, as well as determining whether an idle connection -// is still functional. -// See http://http2.github.io/http2-spec/#rfc.section.6.7 -type PingFrame struct { - FrameHeader - Data [8]byte -} - -func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } - -func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { - if len(payload) != 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - f := &PingFrame{FrameHeader: fh} - copy(f.Data[:], payload) - return f, nil -} - -func (f *Framer) WritePing(ack bool, data [8]byte) error { - var flags Flags - if ack { - flags = FlagPingAck - } - f.startWrite(FramePing, flags, 0) - f.writeBytes(data[:]) - return f.endWrite() -} - -// A GoAwayFrame informs the remote peer to stop creating streams on this connection. -// See http://http2.github.io/http2-spec/#rfc.section.6.8 -type GoAwayFrame struct { - FrameHeader - LastStreamID uint32 - ErrCode ErrCode - debugData []byte -} - -// DebugData returns any debug data in the GOAWAY frame. Its contents -// are not defined. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *GoAwayFrame) DebugData() []byte { - f.checkValid() - return f.debugData -} - -func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p) < 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - return &GoAwayFrame{ - FrameHeader: fh, - LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), - ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), - debugData: p[8:], - }, nil -} - -func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { - f.startWrite(FrameGoAway, 0, 0) - f.writeUint32(maxStreamID & (1<<31 - 1)) - f.writeUint32(uint32(code)) - f.writeBytes(debugData) - return f.endWrite() -} - -// An UnknownFrame is the frame type returned when the frame type is unknown -// or no specific frame type parser exists. -type UnknownFrame struct { - FrameHeader - p []byte -} - -// Payload returns the frame's payload (after the header). It is not -// valid to call this method after a subsequent call to -// Framer.ReadFrame, nor is it valid to retain the returned slice. -// The memory is owned by the Framer and is invalidated when the next -// frame is read. -func (f *UnknownFrame) Payload() []byte { - f.checkValid() - return f.p -} - -func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - return &UnknownFrame{fh, p}, nil -} - -// A WindowUpdateFrame is used to implement flow control. -// See http://http2.github.io/http2-spec/#rfc.section.6.9 -type WindowUpdateFrame struct { - FrameHeader - Increment uint32 // never read with high bit set -} - -func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit - if inc == 0 { - // A receiver MUST treat the receipt of a - // WINDOW_UPDATE frame with an flow control window - // increment of 0 as a stream error (Section 5.4.2) of - // type PROTOCOL_ERROR; errors on the connection flow - // control window MUST be treated as a connection - // error (Section 5.4.1). - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - return &WindowUpdateFrame{ - FrameHeader: fh, - Increment: inc, - }, nil -} - -// WriteWindowUpdate writes a WINDOW_UPDATE frame. -// The increment value must be between 1 and 2,147,483,647, inclusive. -// If the Stream ID is zero, the window update applies to the -// connection as a whole. -func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { - // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." - if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { - return errors.New("illegal window increment value") - } - f.startWrite(FrameWindowUpdate, 0, streamID) - f.writeUint32(incr) - return f.endWrite() -} - -// A HeadersFrame is used to open a stream and additionally carries a -// header block fragment. -type HeadersFrame struct { - FrameHeader - - // Priority is set if FlagHeadersPriority is set in the FrameHeader. - Priority PriorityParam - - headerFragBuf []byte // not owned -} - -func (f *HeadersFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *HeadersFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) -} - -func (f *HeadersFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndStream) -} - -func (f *HeadersFrame) HasPriority() bool { - return f.FrameHeader.Flags.Has(FlagHeadersPriority) -} - -func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { - hf := &HeadersFrame{ - FrameHeader: fh, - } - if fh.StreamID == 0 { - // HEADERS frames MUST be associated with a stream. If a HEADERS frame - // is received whose stream identifier field is 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} - } - var padLength uint8 - if fh.Flags.Has(FlagHeadersPadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - if fh.Flags.Has(FlagHeadersPriority) { - var v uint32 - p, v, err = readUint32(p) - if err != nil { - return nil, err - } - hf.Priority.StreamDep = v & 0x7fffffff - hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set - p, hf.Priority.Weight, err = readByte(p) - if err != nil { - return nil, err - } - } - if len(p)-int(padLength) <= 0 { - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - hf.headerFragBuf = p[:len(p)-int(padLength)] - return hf, nil -} - -// HeadersFrameParam are the parameters for writing a HEADERS frame. -type HeadersFrameParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndStream indicates that the header block is the last that - // the endpoint will send for the identified stream. Setting - // this flag causes the stream to enter one of "half closed" - // states. - EndStream bool - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 - - // Priority, if non-zero, includes stream priority information - // in the HEADER frame. - Priority PriorityParam -} - -// WriteHeaders writes a single HEADERS frame. -// -// This is a low-level header writing method. Encoding headers and -// splitting them into any necessary CONTINUATION frames is handled -// elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteHeaders(p HeadersFrameParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagHeadersPadded - } - if p.EndStream { - flags |= FlagHeadersEndStream - } - if p.EndHeaders { - flags |= FlagHeadersEndHeaders - } - if !p.Priority.IsZero() { - flags |= FlagHeadersPriority - } - f.startWrite(FrameHeaders, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !p.Priority.IsZero() { - v := p.Priority.StreamDep - if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { - return errDepStreamID - } - if p.Priority.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Priority.Weight) - } - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// A PriorityFrame specifies the sender-advised priority of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.3 -type PriorityFrame struct { - FrameHeader - PriorityParam -} - -// PriorityParam are the stream prioritzation parameters. -type PriorityParam struct { - // StreamDep is a 31-bit stream identifier for the - // stream that this stream depends on. Zero means no - // dependency. - StreamDep uint32 - - // Exclusive is whether the dependency is exclusive. - Exclusive bool - - // Weight is the stream's zero-indexed weight. It should be - // set together with StreamDep, or neither should be set. Per - // the spec, "Add one to the value to obtain a weight between - // 1 and 256." - Weight uint8 -} - -func (p PriorityParam) IsZero() bool { - return p == PriorityParam{} -} - -func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} - } - if len(payload) != 5 { - return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} - } - v := binary.BigEndian.Uint32(payload[:4]) - streamID := v & 0x7fffffff // mask off high bit - return &PriorityFrame{ - FrameHeader: fh, - PriorityParam: PriorityParam{ - Weight: payload[4], - StreamDep: streamID, - Exclusive: streamID != v, // was high bit set? - }, - }, nil -} - -// WritePriority writes a PRIORITY frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if !validStreamIDOrZero(p.StreamDep) { - return errDepStreamID - } - f.startWrite(FramePriority, 0, streamID) - v := p.StreamDep - if p.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Weight) - return f.endWrite() -} - -// A RSTStreamFrame allows for abnormal termination of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.4 -type RSTStreamFrame struct { - FrameHeader - ErrCode ErrCode -} - -func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil -} - -// WriteRSTStream writes a RST_STREAM frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - f.startWrite(FrameRSTStream, 0, streamID) - f.writeUint32(uint32(code)) - return f.endWrite() -} - -// A ContinuationFrame is used to continue a sequence of header block fragments. -// See http://http2.github.io/http2-spec/#rfc.section.6.10 -type ContinuationFrame struct { - FrameHeader - headerFragBuf []byte -} - -func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} - } - return &ContinuationFrame{fh, p}, nil -} - -func (f *ContinuationFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *ContinuationFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) -} - -// WriteContinuation writes a CONTINUATION frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if endHeaders { - flags |= FlagContinuationEndHeaders - } - f.startWrite(FrameContinuation, flags, streamID) - f.wbuf = append(f.wbuf, headerBlockFragment...) - return f.endWrite() -} - -// A PushPromiseFrame is used to initiate a server stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.6 -type PushPromiseFrame struct { - FrameHeader - PromiseID uint32 - headerFragBuf []byte // not owned -} - -func (f *PushPromiseFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *PushPromiseFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) -} - -func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { - pp := &PushPromiseFrame{ - FrameHeader: fh, - } - if pp.StreamID == 0 { - // PUSH_PROMISE frames MUST be associated with an existing, - // peer-initiated stream. The stream identifier of a - // PUSH_PROMISE frame indicates the stream it is associated - // with. If the stream identifier field specifies the value - // 0x0, a recipient MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - // The PUSH_PROMISE frame includes optional padding. - // Padding fields and flags are identical to those defined for DATA frames - var padLength uint8 - if fh.Flags.Has(FlagPushPromisePadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - - p, pp.PromiseID, err = readUint32(p) - if err != nil { - return - } - pp.PromiseID = pp.PromiseID & (1<<31 - 1) - - if int(padLength) > len(p) { - // like the DATA frame, error out if padding is longer than the body. - return nil, ConnectionError(ErrCodeProtocol) - } - pp.headerFragBuf = p[:len(p)-int(padLength)] - return pp, nil -} - -// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. -type PushPromiseParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - - // PromiseID is the required Stream ID which this - // Push Promises - PromiseID uint32 - - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 -} - -// WritePushPromise writes a single PushPromise Frame. -// -// As with Header Frames, This is the low level call for writing -// individual frames. Continuation frames are handled elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePushPromise(p PushPromiseParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagPushPromisePadded - } - if p.EndHeaders { - flags |= FlagPushPromiseEndHeaders - } - f.startWrite(FramePushPromise, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { - return errStreamID - } - f.writeUint32(p.PromiseID) - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// WriteRawFrame writes a raw frame. This can be used to write -// extension frames unknown to this package. -func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { - f.startWrite(t, flags, streamID) - f.writeBytes(payload) - return f.endWrite() -} - -func readByte(p []byte) (remain []byte, b byte, err error) { - if len(p) == 0 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[1:], p[0], nil -} - -func readUint32(p []byte) (remain []byte, v uint32, err error) { - if len(p) < 4 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[4:], binary.BigEndian.Uint32(p[:4]), nil -} - -type streamEnder interface { - StreamEnded() bool -} - -type headersEnder interface { - HeadersEnded() bool -} - -type headersOrContinuation interface { - headersEnder - HeaderBlockFragment() []byte -} - -// A MetaHeadersFrame is the representation of one HEADERS frame and -// zero or more contiguous CONTINUATION frames and the decoding of -// their HPACK-encoded contents. -// -// This type of frame does not appear on the wire and is only returned -// by the Framer when Framer.ReadMetaHeaders is set. -type MetaHeadersFrame struct { - *HeadersFrame - - // Fields are the fields contained in the HEADERS and - // CONTINUATION frames. The underlying slice is owned by the - // Framer and must not be retained after the next call to - // ReadFrame. - // - // Fields are guaranteed to be in the correct http2 order and - // not have unknown pseudo header fields or invalid header - // field names or values. Required pseudo header fields may be - // missing, however. Use the MetaHeadersFrame.Pseudo accessor - // method access pseudo headers. - Fields []hpack.HeaderField - - // Truncated is whether the max header list size limit was hit - // and Fields is incomplete. The hpack decoder state is still - // valid, however. - Truncated bool -} - -// PseudoValue returns the given pseudo header field's value. -// The provided pseudo field should not contain the leading colon. -func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { - for _, hf := range mh.Fields { - if !hf.IsPseudo() { - return "" - } - if hf.Name[1:] == pseudo { - return hf.Value - } - } - return "" -} - -// RegularFields returns the regular (non-pseudo) header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[i:] - } - } - return nil -} - -// PseudoFields returns the pseudo header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[:i] - } - } - return mh.Fields -} - -func (mh *MetaHeadersFrame) checkPseudos() error { - var isRequest, isResponse bool - pf := mh.PseudoFields() - for i, hf := range pf { - switch hf.Name { - case ":method", ":path", ":scheme", ":authority": - isRequest = true - case ":status": - isResponse = true - default: - return pseudoHeaderError(hf.Name) - } - // Check for duplicates. - // This would be a bad algorithm, but N is 4. - // And this doesn't allocate. - for _, hf2 := range pf[:i] { - if hf.Name == hf2.Name { - return duplicatePseudoHeaderError(hf.Name) - } - } - } - if isRequest && isResponse { - return errMixPseudoHeaderTypes - } - return nil -} - -func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 -} - -// readMetaFrame returns 0 or more CONTINUATION frames from fr and -// merge them into into the provided hf and returns a MetaHeadersFrame -// with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { - if fr.AllowIllegalReads { - return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") - } - mh := &MetaHeadersFrame{ - HeadersFrame: hf, - } - var remainSize = fr.maxHeaderListSize() - var sawRegular bool - - var invalid error // pseudo header field errors - hdec := fr.ReadMetaHeaders - hdec.SetEmitEnabled(true) - hdec.SetMaxStringLength(fr.maxHeaderStringLen()) - hdec.SetEmitFunc(func(hf hpack.HeaderField) { - if VerboseLogs && fr.logReads { - fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) - } - if !httpguts.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) - } - isPseudo := strings.HasPrefix(hf.Name, ":") - if isPseudo { - if sawRegular { - invalid = errPseudoAfterRegular - } - } else { - sawRegular = true - if !validWireHeaderFieldName(hf.Name) { - invalid = headerFieldNameError(hf.Name) - } - } - - if invalid != nil { - hdec.SetEmitEnabled(false) - return - } - - size := hf.Size() - if size > remainSize { - hdec.SetEmitEnabled(false) - mh.Truncated = true - return - } - remainSize -= size - - mh.Fields = append(mh.Fields, hf) - }) - // Lose reference to MetaHeadersFrame: - defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) - - var hc headersOrContinuation = hf - for { - frag := hc.HeaderBlockFragment() - if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - - if hc.HeadersEnded() { - break - } - if f, err := fr.ReadFrame(); err != nil { - return nil, err - } else { - hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder - } - } - - mh.HeadersFrame.headerFragBuf = nil - mh.HeadersFrame.invalidate() - - if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - if invalid != nil { - fr.errDetail = invalid - if VerboseLogs { - log.Printf("http2: invalid header: %v", invalid) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} - } - if err := mh.checkPseudos(); err != nil { - fr.errDetail = err - if VerboseLogs { - log.Printf("http2: invalid pseudo headers: %v", err) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} - } - return mh, nil -} - -func summarizeFrame(f Frame) string { - var buf bytes.Buffer - f.Header().writeDebug(&buf) - switch f := f.(type) { - case *SettingsFrame: - n := 0 - f.ForeachSetting(func(s Setting) error { - n++ - if n == 1 { - buf.WriteString(", settings:") - } - fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) - return nil - }) - if n > 0 { - buf.Truncate(buf.Len() - 1) // remove trailing comma - } - case *DataFrame: - data := f.Data() - const max = 256 - if len(data) > max { - data = data[:max] - } - fmt.Fprintf(&buf, " data=%q", data) - if len(f.Data()) > max { - fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) - } - case *WindowUpdateFrame: - if f.StreamID == 0 { - buf.WriteString(" (conn)") - } - fmt.Fprintf(&buf, " incr=%v", f.Increment) - case *PingFrame: - fmt.Fprintf(&buf, " ping=%q", f.Data[:]) - case *GoAwayFrame: - fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", - f.LastStreamID, f.ErrCode, f.debugData) - case *RSTStreamFrame: - fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) - } - return buf.String() -} diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go deleted file mode 100644 index 00b2e9e..0000000 --- a/vendor/golang.org/x/net/http2/go16.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "net/http" - "time" -) - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return t1.ExpectContinueTimeout -} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go deleted file mode 100644 index 47b7fae..0000000 --- a/vendor/golang.org/x/net/http2/go17.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package http2 - -import ( - "context" - "net" - "net/http" - "net/http/httptrace" - "time" -) - -type contextContext interface { - context.Context -} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) - if hs := opts.baseConfig(); hs != nil { - ctx = context.WithValue(ctx, http.ServerContextKey, hs) - } - return -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return context.WithCancel(ctx) -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req.WithContext(ctx) -} - -type clientTrace httptrace.ClientTrace - -func reqContext(r *http.Request) context.Context { return r.Context() } - -func (t *Transport) idleConnTimeout() time.Duration { - if t.t1 != nil { - return t.t1.IdleConnTimeout - } - return 0 -} - -func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } - -func traceGotConn(req *http.Request, cc *ClientConn) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GotConn == nil { - return - } - ci := httptrace.GotConnInfo{Conn: cc.tconn} - cc.mu.Lock() - ci.Reused = cc.nextStreamID > 1 - ci.WasIdle = len(cc.streams) == 0 && ci.Reused - if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) - } - cc.mu.Unlock() - - trace.GotConn(ci) -} - -func traceWroteHeaders(trace *clientTrace) { - if trace != nil && trace.WroteHeaders != nil { - trace.WroteHeaders() - } -} - -func traceGot100Continue(trace *clientTrace) { - if trace != nil && trace.Got100Continue != nil { - trace.Got100Continue() - } -} - -func traceWait100Continue(trace *clientTrace) { - if trace != nil && trace.Wait100Continue != nil { - trace.Wait100Continue() - } -} - -func traceWroteRequest(trace *clientTrace, err error) { - if trace != nil && trace.WroteRequest != nil { - trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) - } -} - -func traceFirstResponseByte(trace *clientTrace) { - if trace != nil && trace.GotFirstResponseByte != nil { - trace.GotFirstResponseByte() - } -} - -func requestTrace(req *http.Request) *clientTrace { - trace := httptrace.ContextClientTrace(req.Context()) - return (*clientTrace)(trace) -} - -// Ping sends a PING frame to the server and waits for the ack. -func (cc *ClientConn) Ping(ctx context.Context) error { - return cc.ping(ctx) -} diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go deleted file mode 100644 index b4c52ec..0000000 --- a/vendor/golang.org/x/net/http2/go17_not18.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7,!go1.8 - -package http2 - -import "crypto/tls" - -// temporary copy of Go 1.7's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go deleted file mode 100644 index 4f30d22..0000000 --- a/vendor/golang.org/x/net/http2/go18.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package http2 - -import ( - "crypto/tls" - "io" - "net/http" -) - -func cloneTLSConfig(c *tls.Config) *tls.Config { - c2 := c.Clone() - c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 - return c2 -} - -var _ http.Pusher = (*responseWriter)(nil) - -// Push implements http.Pusher. -func (w *responseWriter) Push(target string, opts *http.PushOptions) error { - internalOpts := pushOptions{} - if opts != nil { - internalOpts.Method = opts.Method - internalOpts.Header = opts.Header - } - return w.push(target, internalOpts) -} - -func configureServer18(h1 *http.Server, h2 *Server) error { - if h2.IdleTimeout == 0 { - if h1.IdleTimeout != 0 { - h2.IdleTimeout = h1.IdleTimeout - } else { - h2.IdleTimeout = h1.ReadTimeout - } - } - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil && panicValue != http.ErrAbortHandler -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return req.GetBody -} - -func reqBodyIsNoBody(body io.ReadCloser) bool { - return body == http.NoBody -} - -func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only diff --git a/vendor/golang.org/x/net/http2/go19.go b/vendor/golang.org/x/net/http2/go19.go deleted file mode 100644 index 38124ba..0000000 --- a/vendor/golang.org/x/net/http2/go19.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package http2 - -import ( - "net/http" -) - -func configureServer19(s *http.Server, conf *Server) error { - s.RegisterOnShutdown(conf.state.startGracefulShutdown) - return nil -} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go deleted file mode 100644 index 9933c9f..0000000 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Defensive debug-only utility to track that functions run on the -// goroutine that they're supposed to. - -package http2 - -import ( - "bytes" - "errors" - "fmt" - "os" - "runtime" - "strconv" - "sync" -) - -var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" - -type goroutineLock uint64 - -func newGoroutineLock() goroutineLock { - if !DebugGoroutines { - return 0 - } - return goroutineLock(curGoroutineID()) -} - -func (g goroutineLock) check() { - if !DebugGoroutines { - return - } - if curGoroutineID() != uint64(g) { - panic("running on the wrong goroutine") - } -} - -func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { - return - } - if curGoroutineID() == uint64(g) { - panic("running on the wrong goroutine") - } -} - -var goroutineSpace = []byte("goroutine ") - -func curGoroutineID() uint64 { - bp := littleBuf.Get().(*[]byte) - defer littleBuf.Put(bp) - b := *bp - b = b[:runtime.Stack(b, false)] - // Parse the 4707 out of "goroutine 4707 [" - b = bytes.TrimPrefix(b, goroutineSpace) - i := bytes.IndexByte(b, ' ') - if i < 0 { - panic(fmt.Sprintf("No space found in %q", b)) - } - b = b[:i] - n, err := parseUintBytes(b, 10, 64) - if err != nil { - panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) - } - return n -} - -var littleBuf = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 64) - return &buf - }, -} - -// parseUintBytes is like strconv.ParseUint, but using a []byte. -func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { - var cutoff, maxVal uint64 - - if bitSize == 0 { - bitSize = int(strconv.IntSize) - } - - s0 := s - switch { - case len(s) < 1: - err = strconv.ErrSyntax - goto Error - - case 2 <= base && base <= 36: - // valid base; nothing to do - - case base == 0: - // Look for octal, hex prefix. - switch { - case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): - base = 16 - s = s[2:] - if len(s) < 1 { - err = strconv.ErrSyntax - goto Error - } - case s[0] == '0': - base = 8 - default: - base = 10 - } - - default: - err = errors.New("invalid base " + strconv.Itoa(base)) - goto Error - } - - n = 0 - cutoff = cutoff64(base) - maxVal = 1<= base { - n = 0 - err = strconv.ErrSyntax - goto Error - } - - if n >= cutoff { - // n*base overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n *= uint64(base) - - n1 := n + uint64(v) - if n1 < n || n1 > maxVal { - // n+v overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n = n1 - } - - return n, nil - -Error: - return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} -} - -// Return the first number n such that n*base >= 1<<64. -func cutoff64(base int) uint64 { - if base < 2 { - return 0 - } - return (1<<64-1)/uint64(base) + 1 -} diff --git a/vendor/golang.org/x/net/http2/h2demo/.gitignore b/vendor/golang.org/x/net/http2/h2demo/.gitignore deleted file mode 100644 index 8a1133f..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -h2demo -h2demo.linux -client-id.dat -client-secret.dat -token.dat -ca-certificates.crt diff --git a/vendor/golang.org/x/net/http2/h2demo/Dockerfile b/vendor/golang.org/x/net/http2/h2demo/Dockerfile deleted file mode 100644 index 9238673..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright 2018 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -FROM scratch -LABEL maintainer "golang-dev@googlegroups.com" - -COPY ca-certificates.crt /etc/ssl/certs/ -COPY h2demo / -ENTRYPOINT ["/h2demo", "-prod"] - diff --git a/vendor/golang.org/x/net/http2/h2demo/Dockerfile.0 b/vendor/golang.org/x/net/http2/h2demo/Dockerfile.0 deleted file mode 100644 index fd8435d..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/Dockerfile.0 +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2018 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -FROM golang:1.9 -LABEL maintainer "golang-dev@googlegroups.com" - -ENV CGO_ENABLED=0 - -# BEGIN deps (run `make update-deps` to update) - -# Repo cloud.google.com/go at 1d0c2da (2018-01-30) -ENV REV=1d0c2da40456a9b47f5376165f275424acc15c09 -RUN go get -d cloud.google.com/go/compute/metadata `#and 6 other pkgs` &&\ - (cd /go/src/cloud.google.com/go && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Repo github.com/golang/protobuf at 9255415 (2018-01-25) -ENV REV=925541529c1fa6821df4e44ce2723319eb2be768 -RUN go get -d github.com/golang/protobuf/proto `#and 6 other pkgs` &&\ - (cd /go/src/github.com/golang/protobuf && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Repo github.com/googleapis/gax-go at 317e000 (2017-09-15) -ENV REV=317e0006254c44a0ac427cc52a0e083ff0b9622f -RUN go get -d github.com/googleapis/gax-go &&\ - (cd /go/src/github.com/googleapis/gax-go && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Repo go4.org at 034d17a (2017-05-25) -ENV REV=034d17a462f7b2dcd1a4a73553ec5357ff6e6c6e -RUN go get -d go4.org/syncutil/singleflight &&\ - (cd /go/src/go4.org && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Repo golang.org/x/build at 8aa9ee0 (2018-02-01) -ENV REV=8aa9ee0e557fd49c14113e5ba106e13a5b455460 -RUN go get -d golang.org/x/build/autocertcache &&\ - (cd /go/src/golang.org/x/build && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Repo golang.org/x/crypto at 1875d0a (2018-01-27) -ENV REV=1875d0a70c90e57f11972aefd42276df65e895b9 -RUN go get -d golang.org/x/crypto/acme `#and 2 other pkgs` &&\ - (cd /go/src/golang.org/x/crypto && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Repo golang.org/x/oauth2 at 30785a2 (2018-01-04) -ENV REV=30785a2c434e431ef7c507b54617d6a951d5f2b4 -RUN go get -d golang.org/x/oauth2 `#and 5 other pkgs` &&\ - (cd /go/src/golang.org/x/oauth2 && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Repo golang.org/x/text at e19ae14 (2017-12-27) -ENV REV=e19ae1496984b1c655b8044a65c0300a3c878dd3 -RUN go get -d golang.org/x/text/secure/bidirule `#and 4 other pkgs` &&\ - (cd /go/src/golang.org/x/text && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Repo google.golang.org/api at 7d0e2d3 (2018-01-30) -ENV REV=7d0e2d350555821bef5a5b8aecf0d12cc1def633 -RUN go get -d google.golang.org/api/gensupport `#and 9 other pkgs` &&\ - (cd /go/src/google.golang.org/api && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Repo google.golang.org/genproto at 4eb30f4 (2018-01-25) -ENV REV=4eb30f4778eed4c258ba66527a0d4f9ec8a36c45 -RUN go get -d google.golang.org/genproto/googleapis/api/annotations `#and 3 other pkgs` &&\ - (cd /go/src/google.golang.org/genproto && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Repo google.golang.org/grpc at 0bd008f (2018-01-25) -ENV REV=0bd008f5fadb62d228f12b18d016709e8139a7af -RUN go get -d google.golang.org/grpc `#and 23 other pkgs` &&\ - (cd /go/src/google.golang.org/grpc && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV) - -# Optimization to speed up iterative development, not necessary for correctness: -RUN go install cloud.google.com/go/compute/metadata \ - cloud.google.com/go/iam \ - cloud.google.com/go/internal \ - cloud.google.com/go/internal/optional \ - cloud.google.com/go/internal/version \ - cloud.google.com/go/storage \ - github.com/golang/protobuf/proto \ - github.com/golang/protobuf/protoc-gen-go/descriptor \ - github.com/golang/protobuf/ptypes \ - github.com/golang/protobuf/ptypes/any \ - github.com/golang/protobuf/ptypes/duration \ - github.com/golang/protobuf/ptypes/timestamp \ - github.com/googleapis/gax-go \ - go4.org/syncutil/singleflight \ - golang.org/x/build/autocertcache \ - golang.org/x/crypto/acme \ - golang.org/x/crypto/acme/autocert \ - golang.org/x/oauth2 \ - golang.org/x/oauth2/google \ - golang.org/x/oauth2/internal \ - golang.org/x/oauth2/jws \ - golang.org/x/oauth2/jwt \ - golang.org/x/text/secure/bidirule \ - golang.org/x/text/transform \ - golang.org/x/text/unicode/bidi \ - golang.org/x/text/unicode/norm \ - google.golang.org/api/gensupport \ - google.golang.org/api/googleapi \ - google.golang.org/api/googleapi/internal/uritemplates \ - google.golang.org/api/googleapi/transport \ - google.golang.org/api/internal \ - google.golang.org/api/iterator \ - google.golang.org/api/option \ - google.golang.org/api/storage/v1 \ - google.golang.org/api/transport/http \ - google.golang.org/genproto/googleapis/api/annotations \ - google.golang.org/genproto/googleapis/iam/v1 \ - google.golang.org/genproto/googleapis/rpc/status \ - google.golang.org/grpc \ - google.golang.org/grpc/balancer \ - google.golang.org/grpc/balancer/base \ - google.golang.org/grpc/balancer/roundrobin \ - google.golang.org/grpc/codes \ - google.golang.org/grpc/connectivity \ - google.golang.org/grpc/credentials \ - google.golang.org/grpc/encoding \ - google.golang.org/grpc/encoding/proto \ - google.golang.org/grpc/grpclb/grpc_lb_v1/messages \ - google.golang.org/grpc/grpclog \ - google.golang.org/grpc/internal \ - google.golang.org/grpc/keepalive \ - google.golang.org/grpc/metadata \ - google.golang.org/grpc/naming \ - google.golang.org/grpc/peer \ - google.golang.org/grpc/resolver \ - google.golang.org/grpc/resolver/dns \ - google.golang.org/grpc/resolver/passthrough \ - google.golang.org/grpc/stats \ - google.golang.org/grpc/status \ - google.golang.org/grpc/tap \ - google.golang.org/grpc/transport -# END deps - -COPY . /go/src/golang.org/x/net/ - -RUN go install -tags "h2demo netgo" -ldflags "-linkmode=external -extldflags '-static -pthread'" golang.org/x/net/http2/h2demo - diff --git a/vendor/golang.org/x/net/http2/h2demo/Makefile b/vendor/golang.org/x/net/http2/h2demo/Makefile deleted file mode 100644 index 306d198..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2018 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -MUTABLE_VERSION ?= latest -VERSION ?= $(shell git rev-parse --short HEAD) - -IMAGE_STAGING := gcr.io/go-dashboard-dev/h2demo -IMAGE_PROD := gcr.io/symbolic-datum-552/h2demo - -DOCKER_IMAGE_build0=build0/h2demo:latest -DOCKER_CTR_build0=h2demo-build0 - -build0: *.go Dockerfile.0 - docker build --force-rm -f Dockerfile.0 --tag=$(DOCKER_IMAGE_build0) ../.. - -h2demo: build0 - docker create --name $(DOCKER_CTR_build0) $(DOCKER_IMAGE_build0) - docker cp $(DOCKER_CTR_build0):/go/bin/$@ $@ - docker rm $(DOCKER_CTR_build0) - -ca-certificates.crt: - docker create --name $(DOCKER_CTR_build0) $(DOCKER_IMAGE_build0) - docker cp $(DOCKER_CTR_build0):/etc/ssl/certs/$@ $@ - docker rm $(DOCKER_CTR_build0) - -update-deps: - go install golang.org/x/build/cmd/gitlock - gitlock --update=Dockerfile.0 --ignore=golang.org/x/net --tags=h2demo golang.org/x/net/http2/h2demo - -docker-prod: Dockerfile h2demo ca-certificates.crt - docker build --force-rm --tag=$(IMAGE_PROD):$(VERSION) . - docker tag $(IMAGE_PROD):$(VERSION) $(IMAGE_PROD):$(MUTABLE_VERSION) -docker-staging: Dockerfile h2demo ca-certificates.crt - docker build --force-rm --tag=$(IMAGE_STAGING):$(VERSION) . - docker tag $(IMAGE_STAGING):$(VERSION) $(IMAGE_STAGING):$(MUTABLE_VERSION) - -push-prod: docker-prod - gcloud docker -- push $(IMAGE_PROD):$(MUTABLE_VERSION) - gcloud docker -- push $(IMAGE_PROD):$(VERSION) -push-staging: docker-staging - gcloud docker -- push $(IMAGE_STAGING):$(MUTABLE_VERSION) - gcloud docker -- push $(IMAGE_STAGING):$(VERSION) - -deploy-prod: push-prod - kubectl set image deployment/h2demo-deployment h2demo=$(IMAGE_PROD):$(VERSION) -deploy-staging: push-staging - kubectl set image deployment/h2demo-deployment h2demo=$(IMAGE_STAGING):$(VERSION) - -.PHONY: clean -clean: - $(RM) h2demo - $(RM) ca-certificates.crt - -FORCE: diff --git a/vendor/golang.org/x/net/http2/h2demo/README b/vendor/golang.org/x/net/http2/h2demo/README deleted file mode 100644 index 212a96f..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/README +++ /dev/null @@ -1,16 +0,0 @@ - -Client: - -- Firefox nightly with about:config network.http.spdy.enabled.http2draft set true - -- Chrome: go to chrome://flags/#enable-spdy4, save and restart (button at bottom) - -Make CA: -$ openssl genrsa -out rootCA.key 2048 -$ openssl req -x509 -new -nodes -key rootCA.key -days 1024 -out rootCA.pem -... install that to Firefox - -Make cert: -$ openssl genrsa -out server.key 2048 -$ openssl req -new -key server.key -out server.csr -$ openssl x509 -req -in server.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out server.crt -days 500 - - diff --git a/vendor/golang.org/x/net/http2/h2demo/deployment-prod.yaml b/vendor/golang.org/x/net/http2/h2demo/deployment-prod.yaml deleted file mode 100644 index a3a20a4..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/deployment-prod.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: h2demo-deployment -spec: - replicas: 1 - template: - metadata: - labels: - app: h2demo - annotations: - container.seccomp.security.alpha.kubernetes.io/h2demo: docker/default - container.apparmor.security.beta.kubernetes.io/h2demo: runtime/default - spec: - containers: - - name: h2demo - image: gcr.io/symbolic-datum-552/h2demo:latest - imagePullPolicy: Always - command: ["/h2demo", "-prod"] - ports: - - containerPort: 80 - - containerPort: 443 - resources: - requests: - cpu: "1" - memory: "1Gi" - limits: - memory: "2Gi" diff --git a/vendor/golang.org/x/net/http2/h2demo/h2demo.go b/vendor/golang.org/x/net/http2/h2demo/h2demo.go deleted file mode 100644 index ce842fd..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/h2demo.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build h2demo - -package main - -import ( - "bytes" - "context" - "crypto/tls" - "flag" - "fmt" - "hash/crc32" - "image" - "image/jpeg" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "path" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "cloud.google.com/go/storage" - "go4.org/syncutil/singleflight" - "golang.org/x/build/autocertcache" - "golang.org/x/crypto/acme/autocert" - "golang.org/x/net/http2" -) - -var ( - prod = flag.Bool("prod", false, "Whether to configure itself to be the production http2.golang.org server.") - - httpsAddr = flag.String("https_addr", "localhost:4430", "TLS address to listen on ('host:port' or ':port'). Required.") - httpAddr = flag.String("http_addr", "", "Plain HTTP address to listen on ('host:port', or ':port'). Empty means no HTTP.") - - hostHTTP = flag.String("http_host", "", "Optional host or host:port to use for http:// links to this service. By default, this is implied from -http_addr.") - hostHTTPS = flag.String("https_host", "", "Optional host or host:port to use for http:// links to this service. By default, this is implied from -https_addr.") -) - -func homeOldHTTP(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, ` - -

Go + HTTP/2

-

Welcome to the Go language's HTTP/2 demo & interop server.

-

Unfortunately, you're not using HTTP/2 right now. To do so:

-
    -
  • Use Firefox Nightly or go to about:config and enable "network.http.spdy.enabled.http2draft"
  • -
  • Use Google Chrome Canary and/or go to chrome://flags/#enable-spdy4 to Enable SPDY/4 (Chrome's name for HTTP/2)
  • -
-

See code & instructions for connecting at https://github.com/golang/net/tree/master/http2.

- -`) -} - -func home(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.NotFound(w, r) - return - } - io.WriteString(w, ` - -

Go + HTTP/2

- -

Welcome to the Go language's HTTP/2 demo & interop server.

- -

Congratulations, you're using HTTP/2 right now.

- -

This server exists for others in the HTTP/2 community to test their HTTP/2 client implementations and point out flaws in our server.

- -

-The code is at golang.org/x/net/http2 and -is used transparently by the Go standard library from Go 1.6 and later. -

- -

Contact info: bradfitz@golang.org, or file a bug.

- -

Handlers for testing

-
    -
  • GET /reqinfo to dump the request + headers received
  • -
  • GET /clockstream streams the current time every second
  • -
  • GET /gophertiles to see a page with a bunch of images
  • -
  • GET /serverpush to see a page with server push
  • -
  • GET /file/gopher.png for a small file (does If-Modified-Since, Content-Range, etc)
  • -
  • GET /file/go.src.tar.gz for a larger file (~10 MB)
  • -
  • GET /redirect to redirect back to / (this page)
  • -
  • GET /goroutines to see all active goroutines in this server
  • -
  • PUT something to /crc32 to get a count of number of bytes and its CRC-32
  • -
  • PUT something to /ECHO and it will be streamed back to you capitalized
  • -
- -`) -} - -func reqInfoHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - fmt.Fprintf(w, "Method: %s\n", r.Method) - fmt.Fprintf(w, "Protocol: %s\n", r.Proto) - fmt.Fprintf(w, "Host: %s\n", r.Host) - fmt.Fprintf(w, "RemoteAddr: %s\n", r.RemoteAddr) - fmt.Fprintf(w, "RequestURI: %q\n", r.RequestURI) - fmt.Fprintf(w, "URL: %#v\n", r.URL) - fmt.Fprintf(w, "Body.ContentLength: %d (-1 means unknown)\n", r.ContentLength) - fmt.Fprintf(w, "Close: %v (relevant for HTTP/1 only)\n", r.Close) - fmt.Fprintf(w, "TLS: %#v\n", r.TLS) - fmt.Fprintf(w, "\nHeaders:\n") - r.Header.Write(w) -} - -func crcHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != "PUT" { - http.Error(w, "PUT required.", 400) - return - } - crc := crc32.NewIEEE() - n, err := io.Copy(crc, r.Body) - if err == nil { - w.Header().Set("Content-Type", "text/plain") - fmt.Fprintf(w, "bytes=%d, CRC32=%x", n, crc.Sum(nil)) - } -} - -type capitalizeReader struct { - r io.Reader -} - -func (cr capitalizeReader) Read(p []byte) (n int, err error) { - n, err = cr.r.Read(p) - for i, b := range p[:n] { - if b >= 'a' && b <= 'z' { - p[i] = b - ('a' - 'A') - } - } - return -} - -type flushWriter struct { - w io.Writer -} - -func (fw flushWriter) Write(p []byte) (n int, err error) { - n, err = fw.w.Write(p) - if f, ok := fw.w.(http.Flusher); ok { - f.Flush() - } - return -} - -func echoCapitalHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != "PUT" { - http.Error(w, "PUT required.", 400) - return - } - io.Copy(flushWriter{w}, capitalizeReader{r.Body}) -} - -var ( - fsGrp singleflight.Group - fsMu sync.Mutex // guards fsCache - fsCache = map[string]http.Handler{} -) - -// fileServer returns a file-serving handler that proxies URL. -// It lazily fetches URL on the first access and caches its contents forever. -func fileServer(url string, latency time.Duration) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if latency > 0 { - time.Sleep(latency) - } - hi, err := fsGrp.Do(url, func() (interface{}, error) { - fsMu.Lock() - if h, ok := fsCache[url]; ok { - fsMu.Unlock() - return h, nil - } - fsMu.Unlock() - - res, err := http.Get(url) - if err != nil { - return nil, err - } - defer res.Body.Close() - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - modTime := time.Now() - var h http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.ServeContent(w, r, path.Base(url), modTime, bytes.NewReader(slurp)) - }) - fsMu.Lock() - fsCache[url] = h - fsMu.Unlock() - return h, nil - }) - if err != nil { - http.Error(w, err.Error(), 500) - return - } - hi.(http.Handler).ServeHTTP(w, r) - }) -} - -func clockStreamHandler(w http.ResponseWriter, r *http.Request) { - clientGone := w.(http.CloseNotifier).CloseNotify() - w.Header().Set("Content-Type", "text/plain") - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - fmt.Fprintf(w, "# ~1KB of junk to force browsers to start rendering immediately: \n") - io.WriteString(w, strings.Repeat("# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n", 13)) - - for { - fmt.Fprintf(w, "%v\n", time.Now()) - w.(http.Flusher).Flush() - select { - case <-ticker.C: - case <-clientGone: - log.Printf("Client %v disconnected from the clock", r.RemoteAddr) - return - } - } -} - -func registerHandlers() { - tiles := newGopherTilesHandler() - push := newPushHandler() - - mux2 := http.NewServeMux() - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - switch { - case r.URL.Path == "/gophertiles": - tiles.ServeHTTP(w, r) // allow HTTP/2 + HTTP/1.x - return - case strings.HasPrefix(r.URL.Path, "/serverpush"): - push.ServeHTTP(w, r) // allow HTTP/2 + HTTP/1.x - return - case r.TLS == nil: // do not allow HTTP/1.x for anything else - http.Redirect(w, r, "https://"+httpsHost()+"/", http.StatusFound) - return - } - if r.ProtoMajor == 1 { - if r.URL.Path == "/reqinfo" { - reqInfoHandler(w, r) - return - } - homeOldHTTP(w, r) - return - } - mux2.ServeHTTP(w, r) - }) - mux2.HandleFunc("/", home) - mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png", 0)) - mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz", 0)) - mux2.HandleFunc("/reqinfo", reqInfoHandler) - mux2.HandleFunc("/crc32", crcHandler) - mux2.HandleFunc("/ECHO", echoCapitalHandler) - mux2.HandleFunc("/clockstream", clockStreamHandler) - mux2.Handle("/gophertiles", tiles) - mux2.HandleFunc("/redirect", func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, "/", http.StatusFound) - }) - stripHomedir := regexp.MustCompile(`/(Users|home)/\w+`) - mux2.HandleFunc("/goroutines", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - buf := make([]byte, 2<<20) - w.Write(stripHomedir.ReplaceAll(buf[:runtime.Stack(buf, true)], nil)) - }) -} - -var pushResources = map[string]http.Handler{ - "/serverpush/static/jquery.min.js": fileServer("https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js", 100*time.Millisecond), - "/serverpush/static/godocs.js": fileServer("https://golang.org/lib/godoc/godocs.js", 100*time.Millisecond), - "/serverpush/static/playground.js": fileServer("https://golang.org/lib/godoc/playground.js", 100*time.Millisecond), - "/serverpush/static/style.css": fileServer("https://golang.org/lib/godoc/style.css", 100*time.Millisecond), -} - -func newPushHandler() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for path, handler := range pushResources { - if r.URL.Path == path { - handler.ServeHTTP(w, r) - return - } - } - - cacheBust := time.Now().UnixNano() - if pusher, ok := w.(http.Pusher); ok { - for path := range pushResources { - url := fmt.Sprintf("%s?%d", path, cacheBust) - if err := pusher.Push(url, nil); err != nil { - log.Printf("Failed to push %v: %v", path, err) - } - } - } - time.Sleep(100 * time.Millisecond) // fake network latency + parsing time - if err := pushTmpl.Execute(w, struct { - CacheBust int64 - HTTPSHost string - HTTPHost string - }{ - CacheBust: cacheBust, - HTTPSHost: httpsHost(), - HTTPHost: httpHost(), - }); err != nil { - log.Printf("Executing server push template: %v", err) - } - }) -} - -func newGopherTilesHandler() http.Handler { - const gopherURL = "https://blog.golang.org/go-programming-language-turns-two_gophers.jpg" - res, err := http.Get(gopherURL) - if err != nil { - log.Fatal(err) - } - if res.StatusCode != 200 { - log.Fatalf("Error fetching %s: %v", gopherURL, res.Status) - } - slurp, err := ioutil.ReadAll(res.Body) - res.Body.Close() - if err != nil { - log.Fatal(err) - } - im, err := jpeg.Decode(bytes.NewReader(slurp)) - if err != nil { - if len(slurp) > 1024 { - slurp = slurp[:1024] - } - log.Fatalf("Failed to decode gopher image: %v (got %q)", err, slurp) - } - - type subImager interface { - SubImage(image.Rectangle) image.Image - } - const tileSize = 32 - xt := im.Bounds().Max.X / tileSize - yt := im.Bounds().Max.Y / tileSize - var tile [][][]byte // y -> x -> jpeg bytes - for yi := 0; yi < yt; yi++ { - var row [][]byte - for xi := 0; xi < xt; xi++ { - si := im.(subImager).SubImage(image.Rectangle{ - Min: image.Point{xi * tileSize, yi * tileSize}, - Max: image.Point{(xi + 1) * tileSize, (yi + 1) * tileSize}, - }) - buf := new(bytes.Buffer) - if err := jpeg.Encode(buf, si, &jpeg.Options{Quality: 90}); err != nil { - log.Fatal(err) - } - row = append(row, buf.Bytes()) - } - tile = append(tile, row) - } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ms, _ := strconv.Atoi(r.FormValue("latency")) - const nanosPerMilli = 1e6 - if r.FormValue("x") != "" { - x, _ := strconv.Atoi(r.FormValue("x")) - y, _ := strconv.Atoi(r.FormValue("y")) - if ms <= 1000 { - time.Sleep(time.Duration(ms) * nanosPerMilli) - } - if x >= 0 && x < xt && y >= 0 && y < yt { - http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(tile[y][x])) - return - } - } - io.WriteString(w, "") - fmt.Fprintf(w, "A grid of %d tiled images is below. Compare:

", xt*yt) - for _, ms := range []int{0, 30, 200, 1000} { - d := time.Duration(ms) * nanosPerMilli - fmt.Fprintf(w, "[HTTP/2, %v latency] [HTTP/1, %v latency]
\n", - httpsHost(), ms, d, - httpHost(), ms, d, - ) - } - io.WriteString(w, "

\n") - cacheBust := time.Now().UnixNano() - for y := 0; y < yt; y++ { - for x := 0; x < xt; x++ { - fmt.Fprintf(w, "", - tileSize, tileSize, x, y, cacheBust, ms) - } - io.WriteString(w, "
\n") - } - io.WriteString(w, `

- -
<< Back to Go HTTP/2 demo server`) - }) -} - -func httpsHost() string { - if *hostHTTPS != "" { - return *hostHTTPS - } - if v := *httpsAddr; strings.HasPrefix(v, ":") { - return "localhost" + v - } else { - return v - } -} - -func httpHost() string { - if *hostHTTP != "" { - return *hostHTTP - } - if v := *httpAddr; strings.HasPrefix(v, ":") { - return "localhost" + v - } else { - return v - } -} - -func serveProdTLS(autocertManager *autocert.Manager) error { - srv := &http.Server{ - TLSConfig: &tls.Config{ - GetCertificate: autocertManager.GetCertificate, - }, - } - http2.ConfigureServer(srv, &http2.Server{ - NewWriteScheduler: func() http2.WriteScheduler { - return http2.NewPriorityWriteScheduler(nil) - }, - }) - ln, err := net.Listen("tcp", ":443") - if err != nil { - return err - } - return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)) -} - -type tcpKeepAliveListener struct { - *net.TCPListener -} - -func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -func serveProd() error { - log.Printf("running in production mode") - - storageClient, err := storage.NewClient(context.Background()) - if err != nil { - log.Fatalf("storage.NewClient: %v", err) - } - autocertManager := &autocert.Manager{ - Prompt: autocert.AcceptTOS, - HostPolicy: autocert.HostWhitelist("http2.golang.org"), - Cache: autocertcache.NewGoogleCloudStorageCache(storageClient, "golang-h2demo-autocert"), - } - - errc := make(chan error, 2) - go func() { errc <- http.ListenAndServe(":80", autocertManager.HTTPHandler(http.DefaultServeMux)) }() - go func() { errc <- serveProdTLS(autocertManager) }() - return <-errc -} - -const idleTimeout = 5 * time.Minute -const activeTimeout = 10 * time.Minute - -// TODO: put this into the standard library and actually send -// PING frames and GOAWAY, etc: golang.org/issue/14204 -func idleTimeoutHook() func(net.Conn, http.ConnState) { - var mu sync.Mutex - m := map[net.Conn]*time.Timer{} - return func(c net.Conn, cs http.ConnState) { - mu.Lock() - defer mu.Unlock() - if t, ok := m[c]; ok { - delete(m, c) - t.Stop() - } - var d time.Duration - switch cs { - case http.StateNew, http.StateIdle: - d = idleTimeout - case http.StateActive: - d = activeTimeout - default: - return - } - m[c] = time.AfterFunc(d, func() { - log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d) - go c.Close() - }) - } -} - -func main() { - var srv http.Server - flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.") - flag.Parse() - srv.Addr = *httpsAddr - srv.ConnState = idleTimeoutHook() - - registerHandlers() - - if *prod { - *hostHTTP = "http2.golang.org" - *hostHTTPS = "http2.golang.org" - log.Fatal(serveProd()) - } - - url := "https://" + httpsHost() + "/" - log.Printf("Listening on " + url) - http2.ConfigureServer(&srv, &http2.Server{}) - - if *httpAddr != "" { - go func() { - log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)") - log.Fatal(http.ListenAndServe(*httpAddr, nil)) - }() - } - - go func() { - log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key")) - }() - select {} -} diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go deleted file mode 100644 index df0866a..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/launch.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "strings" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - compute "google.golang.org/api/compute/v1" -) - -var ( - proj = flag.String("project", "symbolic-datum-552", "name of Project") - zone = flag.String("zone", "us-central1-a", "GCE zone") - mach = flag.String("machinetype", "n1-standard-1", "Machine type") - instName = flag.String("instance_name", "http2-demo", "Name of VM instance.") - sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.") - staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.") - - writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.") - publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.") -) - -func readFile(v string) string { - slurp, err := ioutil.ReadFile(v) - if err != nil { - log.Fatalf("Error reading %s: %v", v, err) - } - return strings.TrimSpace(string(slurp)) -} - -var config = &oauth2.Config{ - // The client-id and secret should be for an "Installed Application" when using - // the CLI. Later we'll use a web application with a callback. - ClientID: readFile("client-id.dat"), - ClientSecret: readFile("client-secret.dat"), - Endpoint: google.Endpoint, - Scopes: []string{ - compute.DevstorageFullControlScope, - compute.ComputeScope, - "https://www.googleapis.com/auth/sqlservice", - "https://www.googleapis.com/auth/sqlservice.admin", - }, - RedirectURL: "urn:ietf:wg:oauth:2.0:oob", -} - -const baseConfig = `#cloud-config -coreos: - units: - - name: h2demo.service - command: start - content: | - [Unit] - Description=HTTP2 Demo - - [Service] - ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo' - ExecStart=/opt/bin/h2demo --prod - RestartSec=5s - Restart=always - Type=simple - - [Install] - WantedBy=multi-user.target -` - -func main() { - flag.Parse() - if *proj == "" { - log.Fatalf("Missing --project flag") - } - prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj - machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach - - const tokenFileName = "token.dat" - tokenFile := tokenCacheFile(tokenFileName) - tokenSource := oauth2.ReuseTokenSource(nil, tokenFile) - token, err := tokenSource.Token() - if err != nil { - if *writeObject != "" { - log.Fatalf("Can't use --write_object without a valid token.dat file already cached.") - } - log.Printf("Error getting token from %s: %v", tokenFileName, err) - log.Printf("Get auth code from %v", config.AuthCodeURL("my-state")) - fmt.Print("\nEnter auth code: ") - sc := bufio.NewScanner(os.Stdin) - sc.Scan() - authCode := strings.TrimSpace(sc.Text()) - token, err = config.Exchange(oauth2.NoContext, authCode) - if err != nil { - log.Fatalf("Error exchanging auth code for a token: %v", err) - } - if err := tokenFile.WriteToken(token); err != nil { - log.Fatalf("Error writing to %s: %v", tokenFileName, err) - } - tokenSource = oauth2.ReuseTokenSource(token, nil) - } - - oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) - - if *writeObject != "" { - writeCloudStorageObject(oauthClient) - return - } - - computeService, _ := compute.New(oauthClient) - - natIP := *staticIP - if natIP == "" { - // Try to find it by name. - aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do() - if err != nil { - log.Fatal(err) - } - // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList - IPLoop: - for _, asl := range aggAddrList.Items { - for _, addr := range asl.Addresses { - if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" { - natIP = addr.Address - break IPLoop - } - } - } - } - - cloudConfig := baseConfig - if *sshPub != "" { - key := strings.TrimSpace(readFile(*sshPub)) - cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key) - } - if os.Getenv("USER") == "bradfitz" { - cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com") - } - const maxCloudConfig = 32 << 10 // per compute API docs - if len(cloudConfig) > maxCloudConfig { - log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig) - } - - instance := &compute.Instance{ - Name: *instName, - Description: "Go Builder", - MachineType: machType, - Disks: []*compute.AttachedDisk{instanceDisk(computeService)}, - Tags: &compute.Tags{ - Items: []string{"http-server", "https-server"}, - }, - Metadata: &compute.Metadata{ - Items: []*compute.MetadataItems{ - { - Key: "user-data", - Value: &cloudConfig, - }, - }, - }, - NetworkInterfaces: []*compute.NetworkInterface{ - { - AccessConfigs: []*compute.AccessConfig{ - { - Type: "ONE_TO_ONE_NAT", - Name: "External NAT", - NatIP: natIP, - }, - }, - Network: prefix + "/global/networks/default", - }, - }, - ServiceAccounts: []*compute.ServiceAccount{ - { - Email: "default", - Scopes: []string{ - compute.DevstorageFullControlScope, - compute.ComputeScope, - }, - }, - }, - } - - log.Printf("Creating instance...") - op, err := computeService.Instances.Insert(*proj, *zone, instance).Do() - if err != nil { - log.Fatalf("Failed to create instance: %v", err) - } - opName := op.Name - log.Printf("Created. Waiting on operation %v", opName) -OpLoop: - for { - time.Sleep(2 * time.Second) - op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do() - if err != nil { - log.Fatalf("Failed to get op %s: %v", opName, err) - } - switch op.Status { - case "PENDING", "RUNNING": - log.Printf("Waiting on operation %v", opName) - continue - case "DONE": - if op.Error != nil { - for _, operr := range op.Error.Errors { - log.Printf("Error: %+v", operr) - } - log.Fatalf("Failed to start.") - } - log.Printf("Success. %+v", op) - break OpLoop - default: - log.Fatalf("Unknown status %q: %+v", op.Status, op) - } - } - - inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do() - if err != nil { - log.Fatalf("Error getting instance after creation: %v", err) - } - ij, _ := json.MarshalIndent(inst, "", " ") - log.Printf("Instance: %s", ij) -} - -func instanceDisk(svc *compute.Service) *compute.AttachedDisk { - const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" - diskName := *instName + "-disk" - - return &compute.AttachedDisk{ - AutoDelete: true, - Boot: true, - Type: "PERSISTENT", - InitializeParams: &compute.AttachedDiskInitializeParams{ - DiskName: diskName, - SourceImage: imageURL, - DiskSizeGb: 50, - }, - } -} - -func writeCloudStorageObject(httpClient *http.Client) { - content := os.Stdin - const maxSlurp = 1 << 20 - var buf bytes.Buffer - n, err := io.CopyN(&buf, content, maxSlurp) - if err != nil && err != io.EOF { - log.Fatalf("Error reading from stdin: %v, %v", n, err) - } - contentType := http.DetectContentType(buf.Bytes()) - - req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content)) - if err != nil { - log.Fatal(err) - } - req.Header.Set("x-goog-api-version", "2") - if *publicObject { - req.Header.Set("x-goog-acl", "public-read") - } - req.Header.Set("Content-Type", contentType) - res, err := httpClient.Do(req) - if err != nil { - log.Fatal(err) - } - if res.StatusCode != 200 { - res.Write(os.Stderr) - log.Fatalf("Failed.") - } - log.Printf("Success.") - os.Exit(0) -} - -type tokenCacheFile string - -func (f tokenCacheFile) Token() (*oauth2.Token, error) { - slurp, err := ioutil.ReadFile(string(f)) - if err != nil { - return nil, err - } - t := new(oauth2.Token) - if err := json.Unmarshal(slurp, t); err != nil { - return nil, err - } - return t, nil -} - -func (f tokenCacheFile) WriteToken(t *oauth2.Token) error { - jt, err := json.Marshal(t) - if err != nil { - return err - } - return ioutil.WriteFile(string(f), jt, 0600) -} diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key deleted file mode 100644 index a15a6ab..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/rootCA.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q -62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby -XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV -mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ -JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ -SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA -nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e -/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx -qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser -hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j -NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E -LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7 -8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c -0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws -K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd -bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo -QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt -Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1 -nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy -b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7 -gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev -WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr -C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj -x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA -hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y ------END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem deleted file mode 100644 index 3a323e7..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG -A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 -DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 -NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv -c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS -R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT -ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk -JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 -mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW -caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G -A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt -hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB -MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES -MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv -bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h -U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao -eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 -UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD -58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n -sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF -kPe6XoSbiLm/kxk32T0= ------END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl deleted file mode 100644 index 6db3891..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -E2CE26BF3285059C diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt deleted file mode 100644 index c59059b..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/server.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV -UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT -C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW -DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow -RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE -ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l -gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2 -dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL -A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws -/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88 -F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB -AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R -rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD -EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19 -KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI -dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU -90p6/CbU71bGbfpM2PHot2fm ------END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key deleted file mode 100644 index f329c14..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/server.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi -fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm -J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef -b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55 -mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/ -fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p -3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3 -qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4 -NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80 -LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN -a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+ -Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL -W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO -gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm -S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS -Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp -V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4 -KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4 -yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5 -drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e -ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R -48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5 -c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY -nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl -IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd ------END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/service.yaml b/vendor/golang.org/x/net/http2/h2demo/service.yaml deleted file mode 100644 index 2b7d541..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/service.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: h2demo -spec: - externalTrafficPolicy: Local - ports: - - port: 80 - targetPort: 80 - name: http - - port: 443 - targetPort: 443 - name: https - selector: - app: h2demo - type: LoadBalancer - loadBalancerIP: 130.211.116.44 diff --git a/vendor/golang.org/x/net/http2/h2demo/tmpl.go b/vendor/golang.org/x/net/http2/h2demo/tmpl.go deleted file mode 100644 index 504d6a7..0000000 --- a/vendor/golang.org/x/net/http2/h2demo/tmpl.go +++ /dev/null @@ -1,1991 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build h2demo - -package main - -import "html/template" - -var pushTmpl = template.Must(template.New("serverpush").Parse(` - - - - - - - - - HTTP/2 Server Push Demo - - - - - - - - - -
-Note: This page exists for demonstration purposes. For the actual cmd/go docs, go to golang.org/cmd/go. -
- -
- - -HTTP/2 with Server Push | HTTP only -
- -
- -
-... -
- - - - -
-
-
-
- Run - Format - - - -
-
- - -
-
- - -

Command go

- - - - - - - - - - - - - - -

-Go is a tool for managing Go source code. -

-

-Usage: -

-
go command [arguments]
-
-

-The commands are: -

-
build       compile packages and dependencies
-clean       remove object files
-doc         show documentation for package or symbol
-env         print Go environment information
-bug         start a bug report
-fix         run go tool fix on packages
-fmt         run gofmt on package sources
-generate    generate Go files by processing source
-get         download and install packages and dependencies
-install     compile and install packages and dependencies
-list        list packages
-run         compile and run Go program
-test        test packages
-tool        run specified go tool
-version     print Go version
-vet         run go tool vet on packages
-
-

-Use "go help [command]" for more information about a command. -

-

-Additional help topics: -

-
c           calling between Go and C
-buildmode   description of build modes
-filetype    file types
-gopath      GOPATH environment variable
-environment environment variables
-importpath  import path syntax
-packages    description of package lists
-testflag    description of testing flags
-testfunc    description of testing functions
-
-

-Use "go help [topic]" for more information about that topic. -

-

Compile packages and dependencies

-

-Usage: -

-
go build [-o output] [-i] [build flags] [packages]
-
-

-Build compiles the packages named by the import paths, -along with their dependencies, but it does not install the results. -

-

-If the arguments to build are a list of .go files, build treats -them as a list of source files specifying a single package. -

-

-When compiling a single main package, build writes -the resulting executable to an output file named after -the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe') -or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe'). -The '.exe' suffix is added when writing a Windows executable. -

-

-When compiling multiple packages or a single non-main package, -build compiles the packages but discards the resulting object, -serving only as a check that the packages can be built. -

-

-When compiling packages, build ignores files that end in '_test.go'. -

-

-The -o flag, only allowed when compiling a single package, -forces build to write the resulting executable or object -to the named output file, instead of the default behavior described -in the last two paragraphs. -

-

-The -i flag installs the packages that are dependencies of the target. -

-

-The build flags are shared by the build, clean, get, install, list, run, -and test commands: -

-
-a
-	force rebuilding of packages that are already up-to-date.
--n
-	print the commands but do not run them.
--p n
-	the number of programs, such as build commands or
-	test binaries, that can be run in parallel.
-	The default is the number of CPUs available.
--race
-	enable data race detection.
-	Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
--msan
-	enable interoperation with memory sanitizer.
-	Supported only on linux/amd64,
-	and only with Clang/LLVM as the host C compiler.
--v
-	print the names of packages as they are compiled.
--work
-	print the name of the temporary work directory and
-	do not delete it when exiting.
--x
-	print the commands.
-
--asmflags 'flag list'
-	arguments to pass on each go tool asm invocation.
--buildmode mode
-	build mode to use. See 'go help buildmode' for more.
--compiler name
-	name of compiler to use, as in runtime.Compiler (gccgo or gc).
--gccgoflags 'arg list'
-	arguments to pass on each gccgo compiler/linker invocation.
--gcflags 'arg list'
-	arguments to pass on each go tool compile invocation.
--installsuffix suffix
-	a suffix to use in the name of the package installation directory,
-	in order to keep output separate from default builds.
-	If using the -race flag, the install suffix is automatically set to race
-	or, if set explicitly, has _race appended to it.  Likewise for the -msan
-	flag.  Using a -buildmode option that requires non-default compile flags
-	has a similar effect.
--ldflags 'flag list'
-	arguments to pass on each go tool link invocation.
--linkshared
-	link against shared libraries previously created with
-	-buildmode=shared.
--pkgdir dir
-	install and load all packages from dir instead of the usual locations.
-	For example, when building with a non-standard configuration,
-	use -pkgdir to keep generated packages in a separate location.
--tags 'tag list'
-	a list of build tags to consider satisfied during the build.
-	For more information about build tags, see the description of
-	build constraints in the documentation for the go/build package.
--toolexec 'cmd args'
-	a program to use to invoke toolchain programs like vet and asm.
-	For example, instead of running asm, the go command will run
-	'cmd args /path/to/asm <arguments for asm>'.
-
-

-The list flags accept a space-separated list of strings. To embed spaces -in an element in the list, surround it with either single or double quotes. -

-

-For more about specifying packages, see 'go help packages'. -For more about where packages and binaries are installed, -run 'go help gopath'. -For more about calling between Go and C/C++, run 'go help c'. -

-

-Note: Build adheres to certain conventions such as those described -by 'go help gopath'. Not all projects can follow these conventions, -however. Installations that have their own conventions or that use -a separate software build system may choose to use lower-level -invocations such as 'go tool compile' and 'go tool link' to avoid -some of the overheads and design decisions of the build tool. -

-

-See also: go install, go get, go clean. -

-

Remove object files

-

-Usage: -

-
go clean [-i] [-r] [-n] [-x] [build flags] [packages]
-
-

-Clean removes object files from package source directories. -The go command builds most objects in a temporary directory, -so go clean is mainly concerned with object files left by other -tools or by manual invocations of go build. -

-

-Specifically, clean removes the following files from each of the -source directories corresponding to the import paths: -

-
_obj/            old object directory, left from Makefiles
-_test/           old test directory, left from Makefiles
-_testmain.go     old gotest file, left from Makefiles
-test.out         old test log, left from Makefiles
-build.out        old test log, left from Makefiles
-*.[568ao]        object files, left from Makefiles
-
-DIR(.exe)        from go build
-DIR.test(.exe)   from go test -c
-MAINFILE(.exe)   from go build MAINFILE.go
-*.so             from SWIG
-
-

-In the list, DIR represents the final path element of the -directory, and MAINFILE is the base name of any Go source -file in the directory that is not included when building -the package. -

-

-The -i flag causes clean to remove the corresponding installed -archive or binary (what 'go install' would create). -

-

-The -n flag causes clean to print the remove commands it would execute, -but not run them. -

-

-The -r flag causes clean to be applied recursively to all the -dependencies of the packages named by the import paths. -

-

-The -x flag causes clean to print remove commands as it executes them. -

-

-For more about build flags, see 'go help build'. -

-

-For more about specifying packages, see 'go help packages'. -

-

Show documentation for package or symbol

-

-Usage: -

-
go doc [-u] [-c] [package|[package.]symbol[.method]]
-
-

-Doc prints the documentation comments associated with the item identified by its -arguments (a package, const, func, type, var, or method) followed by a one-line -summary of each of the first-level items "under" that item (package-level -declarations for a package, methods for a type, etc.). -

-

-Doc accepts zero, one, or two arguments. -

-

-Given no arguments, that is, when run as -

-
go doc
-
-

-it prints the package documentation for the package in the current directory. -If the package is a command (package main), the exported symbols of the package -are elided from the presentation unless the -cmd flag is provided. -

-

-When run with one argument, the argument is treated as a Go-syntax-like -representation of the item to be documented. What the argument selects depends -on what is installed in GOROOT and GOPATH, as well as the form of the argument, -which is schematically one of these: -

-
go doc <pkg>
-go doc <sym>[.<method>]
-go doc [<pkg>.]<sym>[.<method>]
-go doc [<pkg>.][<sym>.]<method>
-
-

-The first item in this list matched by the argument is the one whose documentation -is printed. (See the examples below.) However, if the argument starts with a capital -letter it is assumed to identify a symbol or method in the current directory. -

-

-For packages, the order of scanning is determined lexically in breadth-first order. -That is, the package presented is the one that matches the search and is nearest -the root and lexically first at its level of the hierarchy. The GOROOT tree is -always scanned in its entirety before GOPATH. -

-

-If there is no package specified or matched, the package in the current -directory is selected, so "go doc Foo" shows the documentation for symbol Foo in -the current package. -

-

-The package path must be either a qualified path or a proper suffix of a -path. The go tool's usual package mechanism does not apply: package path -elements like . and ... are not implemented by go doc. -

-

-When run with two arguments, the first must be a full package path (not just a -suffix), and the second is a symbol or symbol and method; this is similar to the -syntax accepted by godoc: -

-
go doc <pkg> <sym>[.<method>]
-
-

-In all forms, when matching symbols, lower-case letters in the argument match -either case but upper-case letters match exactly. This means that there may be -multiple matches of a lower-case argument in a package if different symbols have -different cases. If this occurs, documentation for all matches is printed. -

-

-Examples: -

-
go doc
-	Show documentation for current package.
-go doc Foo
-	Show documentation for Foo in the current package.
-	(Foo starts with a capital letter so it cannot match
-	a package path.)
-go doc encoding/json
-	Show documentation for the encoding/json package.
-go doc json
-	Shorthand for encoding/json.
-go doc json.Number (or go doc json.number)
-	Show documentation and method summary for json.Number.
-go doc json.Number.Int64 (or go doc json.number.int64)
-	Show documentation for json.Number's Int64 method.
-go doc cmd/doc
-	Show package docs for the doc command.
-go doc -cmd cmd/doc
-	Show package docs and exported symbols within the doc command.
-go doc template.new
-	Show documentation for html/template's New function.
-	(html/template is lexically before text/template)
-go doc text/template.new # One argument
-	Show documentation for text/template's New function.
-go doc text/template new # Two arguments
-	Show documentation for text/template's New function.
-
-At least in the current tree, these invocations all print the
-documentation for json.Decoder's Decode method:
-
-go doc json.Decoder.Decode
-go doc json.decoder.decode
-go doc json.decode
-cd go/src/encoding/json; go doc decode
-
-

-Flags: -

-
-c
-	Respect case when matching symbols.
--cmd
-	Treat a command (package main) like a regular package.
-	Otherwise package main's exported symbols are hidden
-	when showing the package's top-level documentation.
--u
-	Show documentation for unexported as well as exported
-	symbols and methods.
-
-

Print Go environment information

-

-Usage: -

-
go env [var ...]
-
-

-Env prints Go environment information. -

-

-By default env prints information as a shell script -(on Windows, a batch file). If one or more variable -names is given as arguments, env prints the value of -each named variable on its own line. -

-

Start a bug report

-

-Usage: -

-
go bug
-
-

-Bug opens the default browser and starts a new bug report. -The report includes useful system information. -

-

Run go tool fix on packages

-

-Usage: -

-
go fix [packages]
-
-

-Fix runs the Go fix command on the packages named by the import paths. -

-

-For more about fix, see 'go doc cmd/fix'. -For more about specifying packages, see 'go help packages'. -

-

-To run fix with specific options, run 'go tool fix'. -

-

-See also: go fmt, go vet. -

-

Run gofmt on package sources

-

-Usage: -

-
go fmt [-n] [-x] [packages]
-
-

-Fmt runs the command 'gofmt -l -w' on the packages named -by the import paths. It prints the names of the files that are modified. -

-

-For more about gofmt, see 'go doc cmd/gofmt'. -For more about specifying packages, see 'go help packages'. -

-

-The -n flag prints commands that would be executed. -The -x flag prints commands as they are executed. -

-

-To run gofmt with specific options, run gofmt itself. -

-

-See also: go fix, go vet. -

-

Generate Go files by processing source

-

-Usage: -

-
go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
-
-

-Generate runs commands described by directives within existing -files. Those commands can run any process but the intent is to -create or update Go source files. -

-

-Go generate is never run automatically by go build, go get, go test, -and so on. It must be run explicitly. -

-

-Go generate scans the file for directives, which are lines of -the form, -

-
//go:generate command argument...
-
-

-(note: no leading spaces and no space in "//go") where command -is the generator to be run, corresponding to an executable file -that can be run locally. It must either be in the shell path -(gofmt), a fully qualified path (/usr/you/bin/mytool), or a -command alias, described below. -

-

-Note that go generate does not parse the file, so lines that look -like directives in comments or multiline strings will be treated -as directives. -

-

-The arguments to the directive are space-separated tokens or -double-quoted strings passed to the generator as individual -arguments when it is run. -

-

-Quoted strings use Go syntax and are evaluated before execution; a -quoted string appears as a single argument to the generator. -

-

-Go generate sets several variables when it runs the generator: -

-
$GOARCH
-	The execution architecture (arm, amd64, etc.)
-$GOOS
-	The execution operating system (linux, windows, etc.)
-$GOFILE
-	The base name of the file.
-$GOLINE
-	The line number of the directive in the source file.
-$GOPACKAGE
-	The name of the package of the file containing the directive.
-$DOLLAR
-	A dollar sign.
-
-

-Other than variable substitution and quoted-string evaluation, no -special processing such as "globbing" is performed on the command -line. -

-

-As a last step before running the command, any invocations of any -environment variables with alphanumeric names, such as $GOFILE or -$HOME, are expanded throughout the command line. The syntax for -variable expansion is $NAME on all operating systems. Due to the -order of evaluation, variables are expanded even inside quoted -strings. If the variable NAME is not set, $NAME expands to the -empty string. -

-

-A directive of the form, -

-
//go:generate -command xxx args...
-
-

-specifies, for the remainder of this source file only, that the -string xxx represents the command identified by the arguments. This -can be used to create aliases or to handle multiword generators. -For example, -

-
//go:generate -command foo go tool foo
-
-

-specifies that the command "foo" represents the generator -"go tool foo". -

-

-Generate processes packages in the order given on the command line, -one at a time. If the command line lists .go files, they are treated -as a single package. Within a package, generate processes the -source files in a package in file name order, one at a time. Within -a source file, generate runs generators in the order they appear -in the file, one at a time. -

-

-If any generator returns an error exit status, "go generate" skips -all further processing for that package. -

-

-The generator is run in the package's source directory. -

-

-Go generate accepts one specific flag: -

-
-run=""
-	if non-empty, specifies a regular expression to select
-	directives whose full original source text (excluding
-	any trailing spaces and final newline) matches the
-	expression.
-
-

-It also accepts the standard build flags including -v, -n, and -x. -The -v flag prints the names of packages and files as they are -processed. -The -n flag prints commands that would be executed. -The -x flag prints commands as they are executed. -

-

-For more about build flags, see 'go help build'. -

-

-For more about specifying packages, see 'go help packages'. -

-

Download and install packages and dependencies

-

-Usage: -

-
go get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]
-
-

-Get downloads the packages named by the import paths, along with their -dependencies. It then installs the named packages, like 'go install'. -

-

-The -d flag instructs get to stop after downloading the packages; that is, -it instructs get not to install the packages. -

-

-The -f flag, valid only when -u is set, forces get -u not to verify that -each package has been checked out from the source control repository -implied by its import path. This can be useful if the source is a local fork -of the original. -

-

-The -fix flag instructs get to run the fix tool on the downloaded packages -before resolving dependencies or building the code. -

-

-The -insecure flag permits fetching from repositories and resolving -custom domains using insecure schemes such as HTTP. Use with caution. -

-

-The -t flag instructs get to also download the packages required to build -the tests for the specified packages. -

-

-The -u flag instructs get to use the network to update the named packages -and their dependencies. By default, get uses the network to check out -missing packages but does not use it to look for updates to existing packages. -

-

-The -v flag enables verbose progress and debug output. -

-

-Get also accepts build flags to control the installation. See 'go help build'. -

-

-When checking out a new package, get creates the target directory -GOPATH/src/<import-path>. If the GOPATH contains multiple entries, -get uses the first one. For more details see: 'go help gopath'. -

-

-When checking out or updating a package, get looks for a branch or tag -that matches the locally installed version of Go. The most important -rule is that if the local installation is running version "go1", get -searches for a branch or tag named "go1". If no such version exists it -retrieves the most recent version of the package. -

-

-When go get checks out or updates a Git repository, -it also updates any git submodules referenced by the repository. -

-

-Get never checks out or updates code stored in vendor directories. -

-

-For more about specifying packages, see 'go help packages'. -

-

-For more about how 'go get' finds source code to -download, see 'go help importpath'. -

-

-See also: go build, go install, go clean. -

-

Compile and install packages and dependencies

-

-Usage: -

-
go install [build flags] [packages]
-
-

-Install compiles and installs the packages named by the import paths, -along with their dependencies. -

-

-For more about the build flags, see 'go help build'. -For more about specifying packages, see 'go help packages'. -

-

-See also: go build, go get, go clean. -

-

List packages

-

-Usage: -

-
go list [-e] [-f format] [-json] [build flags] [packages]
-
-

-List lists the packages named by the import paths, one per line. -

-

-The default output shows the package import path: -

-
bytes
-encoding/json
-github.com/gorilla/mux
-golang.org/x/net/html
-
-

-The -f flag specifies an alternate format for the list, using the -syntax of package template. The default output is equivalent to -f -''. The struct being passed to the template is: -

-
type Package struct {
-    Dir           string // directory containing package sources
-    ImportPath    string // import path of package in dir
-    ImportComment string // path in import comment on package statement
-    Name          string // package name
-    Doc           string // package documentation string
-    Target        string // install path
-    Shlib         string // the shared library that contains this package (only set when -linkshared)
-    Goroot        bool   // is this package in the Go root?
-    Standard      bool   // is this package part of the standard Go library?
-    Stale         bool   // would 'go install' do anything for this package?
-    StaleReason   string // explanation for Stale==true
-    Root          string // Go root or Go path dir containing this package
-    ConflictDir   string // this directory shadows Dir in $GOPATH
-    BinaryOnly    bool   // binary-only package: cannot be recompiled from sources
-
-    // Source files
-    GoFiles        []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
-    CgoFiles       []string // .go sources files that import "C"
-    IgnoredGoFiles []string // .go sources ignored due to build constraints
-    CFiles         []string // .c source files
-    CXXFiles       []string // .cc, .cxx and .cpp source files
-    MFiles         []string // .m source files
-    HFiles         []string // .h, .hh, .hpp and .hxx source files
-    FFiles         []string // .f, .F, .for and .f90 Fortran source files
-    SFiles         []string // .s source files
-    SwigFiles      []string // .swig files
-    SwigCXXFiles   []string // .swigcxx files
-    SysoFiles      []string // .syso object files to add to archive
-    TestGoFiles    []string // _test.go files in package
-    XTestGoFiles   []string // _test.go files outside package
-
-    // Cgo directives
-    CgoCFLAGS    []string // cgo: flags for C compiler
-    CgoCPPFLAGS  []string // cgo: flags for C preprocessor
-    CgoCXXFLAGS  []string // cgo: flags for C++ compiler
-    CgoFFLAGS    []string // cgo: flags for Fortran compiler
-    CgoLDFLAGS   []string // cgo: flags for linker
-    CgoPkgConfig []string // cgo: pkg-config names
-
-    // Dependency information
-    Imports      []string // import paths used by this package
-    Deps         []string // all (recursively) imported dependencies
-    TestImports  []string // imports from TestGoFiles
-    XTestImports []string // imports from XTestGoFiles
-
-    // Error information
-    Incomplete bool            // this package or a dependency has an error
-    Error      *PackageError   // error loading package
-    DepsErrors []*PackageError // errors loading dependencies
-}
-
-

-Packages stored in vendor directories report an ImportPath that includes the -path to the vendor directory (for example, "d/vendor/p" instead of "p"), -so that the ImportPath uniquely identifies a given copy of a package. -The Imports, Deps, TestImports, and XTestImports lists also contain these -expanded imports paths. See golang.org/s/go15vendor for more about vendoring. -

-

-The error information, if any, is -

-
type PackageError struct {
-    ImportStack   []string // shortest path from package named on command line to this one
-    Pos           string   // position of error (if present, file:line:col)
-    Err           string   // the error itself
-}
-
-

-The template function "join" calls strings.Join. -

-

-The template function "context" returns the build context, defined as: -

-
type Context struct {
-	GOARCH        string   // target architecture
-	GOOS          string   // target operating system
-	GOROOT        string   // Go root
-	GOPATH        string   // Go path
-	CgoEnabled    bool     // whether cgo can be used
-	UseAllFiles   bool     // use files regardless of +build lines, file names
-	Compiler      string   // compiler to assume when computing target paths
-	BuildTags     []string // build constraints to match in +build lines
-	ReleaseTags   []string // releases the current release is compatible with
-	InstallSuffix string   // suffix to use in the name of the install dir
-}
-
-

-For more information about the meaning of these fields see the documentation -for the go/build package's Context type. -

-

-The -json flag causes the package data to be printed in JSON format -instead of using the template format. -

-

-The -e flag changes the handling of erroneous packages, those that -cannot be found or are malformed. By default, the list command -prints an error to standard error for each erroneous package and -omits the packages from consideration during the usual printing. -With the -e flag, the list command never prints errors to standard -error and instead processes the erroneous packages with the usual -printing. Erroneous packages will have a non-empty ImportPath and -a non-nil Error field; other information may or may not be missing -(zeroed). -

-

-For more about build flags, see 'go help build'. -

-

-For more about specifying packages, see 'go help packages'. -

-

Compile and run Go program

-

-Usage: -

-
go run [build flags] [-exec xprog] gofiles... [arguments...]
-
-

-Run compiles and runs the main package comprising the named Go source files. -A Go source file is defined to be a file ending in a literal ".go" suffix. -

-

-By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. -If the -exec flag is given, 'go run' invokes the binary using xprog: -

-
'xprog a.out arguments...'.
-
-

-If the -exec flag is not given, GOOS or GOARCH is different from the system -default, and a program named go_$GOOS_$GOARCH_exec can be found -on the current search path, 'go run' invokes the binary using that program, -for example 'go_nacl_386_exec a.out arguments...'. This allows execution of -cross-compiled programs when a simulator or other execution method is -available. -

-

-For more about build flags, see 'go help build'. -

-

-See also: go build. -

-

Test packages

-

-Usage: -

-
go test [build/test flags] [packages] [build/test flags & test binary flags]
-
-

-'Go test' automates testing the packages named by the import paths. -It prints a summary of the test results in the format: -

-
ok   archive/tar   0.011s
-FAIL archive/zip   0.022s
-ok   compress/gzip 0.033s
-...
-
-

-followed by detailed output for each failed package. -

-

-'Go test' recompiles each package along with any files with names matching -the file pattern "*_test.go". -Files whose names begin with "_" (including "_test.go") or "." are ignored. -These additional files can contain test functions, benchmark functions, and -example functions. See 'go help testfunc' for more. -Each listed package causes the execution of a separate test binary. -

-

-Test files that declare a package with the suffix "_test" will be compiled as a -separate package, and then linked and run with the main test binary. -

-

-The go tool will ignore a directory named "testdata", making it available -to hold ancillary data needed by the tests. -

-

-By default, go test needs no arguments. It compiles and tests the package -with source in the current directory, including tests, and runs the tests. -

-

-The package is built in a temporary directory so it does not interfere with the -non-test installation. -

-

-In addition to the build flags, the flags handled by 'go test' itself are: -

-
-args
-    Pass the remainder of the command line (everything after -args)
-    to the test binary, uninterpreted and unchanged.
-    Because this flag consumes the remainder of the command line,
-    the package list (if present) must appear before this flag.
-
--c
-    Compile the test binary to pkg.test but do not run it
-    (where pkg is the last element of the package's import path).
-    The file name can be changed with the -o flag.
-
--exec xprog
-    Run the test binary using xprog. The behavior is the same as
-    in 'go run'. See 'go help run' for details.
-
--i
-    Install packages that are dependencies of the test.
-    Do not run the test.
-
--o file
-    Compile the test binary to the named file.
-    The test still runs (unless -c or -i is specified).
-
-

-The test binary also accepts flags that control execution of the test; these -flags are also accessible by 'go test'. See 'go help testflag' for details. -

-

-For more about build flags, see 'go help build'. -For more about specifying packages, see 'go help packages'. -

-

-See also: go build, go vet. -

-

Run specified go tool

-

-Usage: -

-
go tool [-n] command [args...]
-
-

-Tool runs the go tool command identified by the arguments. -With no arguments it prints the list of known tools. -

-

-The -n flag causes tool to print the command that would be -executed but not execute it. -

-

-For more about each tool command, see 'go tool command -h'. -

-

Print Go version

-

-Usage: -

-
go version
-
-

-Version prints the Go version, as reported by runtime.Version. -

-

Run go tool vet on packages

-

-Usage: -

-
go vet [-n] [-x] [build flags] [packages]
-
-

-Vet runs the Go vet command on the packages named by the import paths. -

-

-For more about vet, see 'go doc cmd/vet'. -For more about specifying packages, see 'go help packages'. -

-

-To run the vet tool with specific options, run 'go tool vet'. -

-

-The -n flag prints commands that would be executed. -The -x flag prints commands as they are executed. -

-

-For more about build flags, see 'go help build'. -

-

-See also: go fmt, go fix. -

-

Calling between Go and C

-

-There are two different ways to call between Go and C/C++ code. -

-

-The first is the cgo tool, which is part of the Go distribution. For -information on how to use it see the cgo documentation (go doc cmd/cgo). -

-

-The second is the SWIG program, which is a general tool for -interfacing between languages. For information on SWIG see -http://swig.org/. When running go build, any file with a .swig -extension will be passed to SWIG. Any file with a .swigcxx extension -will be passed to SWIG with the -c++ option. -

-

-When either cgo or SWIG is used, go build will pass any .c, .m, .s, -or .S files to the C compiler, and any .cc, .cpp, .cxx files to the C++ -compiler. The CC or CXX environment variables may be set to determine -the C or C++ compiler, respectively, to use. -

-

Description of build modes

-

-The 'go build' and 'go install' commands take a -buildmode argument which -indicates which kind of object file is to be built. Currently supported values -are: -

-
-buildmode=archive
-	Build the listed non-main packages into .a files. Packages named
-	main are ignored.
-
--buildmode=c-archive
-	Build the listed main package, plus all packages it imports,
-	into a C archive file. The only callable symbols will be those
-	functions exported using a cgo //export comment. Requires
-	exactly one main package to be listed.
-
--buildmode=c-shared
-	Build the listed main packages, plus all packages that they
-	import, into C shared libraries. The only callable symbols will
-	be those functions exported using a cgo //export comment.
-	Non-main packages are ignored.
-
--buildmode=default
-	Listed main packages are built into executables and listed
-	non-main packages are built into .a files (the default
-	behavior).
-
--buildmode=shared
-	Combine all the listed non-main packages into a single shared
-	library that will be used when building with the -linkshared
-	option. Packages named main are ignored.
-
--buildmode=exe
-	Build the listed main packages and everything they import into
-	executables. Packages not named main are ignored.
-
--buildmode=pie
-	Build the listed main packages and everything they import into
-	position independent executables (PIE). Packages not named
-	main are ignored.
-
--buildmode=plugin
-	Build the listed main packages, plus all packages that they
-	import, into a Go plugin. Packages not named main are ignored.
-
-

File types

-

-The go command examines the contents of a restricted set of files -in each directory. It identifies which files to examine based on -the extension of the file name. These extensions are: -

-
.go
-	Go source files.
-.c, .h
-	C source files.
-	If the package uses cgo or SWIG, these will be compiled with the
-	OS-native compiler (typically gcc); otherwise they will
-	trigger an error.
-.cc, .cpp, .cxx, .hh, .hpp, .hxx
-	C++ source files. Only useful with cgo or SWIG, and always
-	compiled with the OS-native compiler.
-.m
-	Objective-C source files. Only useful with cgo, and always
-	compiled with the OS-native compiler.
-.s, .S
-	Assembler source files.
-	If the package uses cgo or SWIG, these will be assembled with the
-	OS-native assembler (typically gcc (sic)); otherwise they
-	will be assembled with the Go assembler.
-.swig, .swigcxx
-	SWIG definition files.
-.syso
-	System object files.
-
-

-Files of each of these types except .syso may contain build -constraints, but the go command stops scanning for build constraints -at the first item in the file that is not a blank line or //-style -line comment. See the go/build package documentation for -more details. -

-

-Non-test Go source files can also include a //go:binary-only-package -comment, indicating that the package sources are included -for documentation only and must not be used to build the -package binary. This enables distribution of Go packages in -their compiled form alone. See the go/build package documentation -for more details. -

-

GOPATH environment variable

-

-The Go path is used to resolve import statements. -It is implemented by and documented in the go/build package. -

-

-The GOPATH environment variable lists places to look for Go code. -On Unix, the value is a colon-separated string. -On Windows, the value is a semicolon-separated string. -On Plan 9, the value is a list. -

-

-If the environment variable is unset, GOPATH defaults -to a subdirectory named "go" in the user's home directory -($HOME/go on Unix, %USERPROFILE%\go on Windows), -unless that directory holds a Go distribution. -Run "go env GOPATH" to see the current GOPATH. -

-

-See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH. -

-

-Each directory listed in GOPATH must have a prescribed structure: -

-

-The src directory holds source code. The path below src -determines the import path or executable name. -

-

-The pkg directory holds installed package objects. -As in the Go tree, each target operating system and -architecture pair has its own subdirectory of pkg -(pkg/GOOS_GOARCH). -

-

-If DIR is a directory listed in the GOPATH, a package with -source in DIR/src/foo/bar can be imported as "foo/bar" and -has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a". -

-

-The bin directory holds compiled commands. -Each command is named for its source directory, but only -the final element, not the entire path. That is, the -command with source in DIR/src/foo/quux is installed into -DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped -so that you can add DIR/bin to your PATH to get at the -installed commands. If the GOBIN environment variable is -set, commands are installed to the directory it names instead -of DIR/bin. GOBIN must be an absolute path. -

-

-Here's an example directory layout: -

-
GOPATH=/home/user/go
-
-/home/user/go/
-    src/
-        foo/
-            bar/               (go code in package bar)
-                x.go
-            quux/              (go code in package main)
-                y.go
-    bin/
-        quux                   (installed command)
-    pkg/
-        linux_amd64/
-            foo/
-                bar.a          (installed package object)
-
-

-Go searches each directory listed in GOPATH to find source code, -but new packages are always downloaded into the first directory -in the list. -

-

-See https://golang.org/doc/code.html for an example. -

-

Internal Directories

-

-Code in or below a directory named "internal" is importable only -by code in the directory tree rooted at the parent of "internal". -Here's an extended version of the directory layout above: -

-
/home/user/go/
-    src/
-        crash/
-            bang/              (go code in package bang)
-                b.go
-        foo/                   (go code in package foo)
-            f.go
-            bar/               (go code in package bar)
-                x.go
-            internal/
-                baz/           (go code in package baz)
-                    z.go
-            quux/              (go code in package main)
-                y.go
-
-

-The code in z.go is imported as "foo/internal/baz", but that -import statement can only appear in source files in the subtree -rooted at foo. The source files foo/f.go, foo/bar/x.go, and -foo/quux/y.go can all import "foo/internal/baz", but the source file -crash/bang/b.go cannot. -

-

-See https://golang.org/s/go14internal for details. -

-

Vendor Directories

-

-Go 1.6 includes support for using local copies of external dependencies -to satisfy imports of those dependencies, often referred to as vendoring. -

-

-Code below a directory named "vendor" is importable only -by code in the directory tree rooted at the parent of "vendor", -and only using an import path that omits the prefix up to and -including the vendor element. -

-

-Here's the example from the previous section, -but with the "internal" directory renamed to "vendor" -and a new foo/vendor/crash/bang directory added: -

-
/home/user/go/
-    src/
-        crash/
-            bang/              (go code in package bang)
-                b.go
-        foo/                   (go code in package foo)
-            f.go
-            bar/               (go code in package bar)
-                x.go
-            vendor/
-                crash/
-                    bang/      (go code in package bang)
-                        b.go
-                baz/           (go code in package baz)
-                    z.go
-            quux/              (go code in package main)
-                y.go
-
-

-The same visibility rules apply as for internal, but the code -in z.go is imported as "baz", not as "foo/vendor/baz". -

-

-Code in vendor directories deeper in the source tree shadows -code in higher directories. Within the subtree rooted at foo, an import -of "crash/bang" resolves to "foo/vendor/crash/bang", not the -top-level "crash/bang". -

-

-Code in vendor directories is not subject to import path -checking (see 'go help importpath'). -

-

-When 'go get' checks out or updates a git repository, it now also -updates submodules. -

-

-Vendor directories do not affect the placement of new repositories -being checked out for the first time by 'go get': those are always -placed in the main GOPATH, never in a vendor subtree. -

-

-See https://golang.org/s/go15vendor for details. -

-

Environment variables

-

-The go command, and the tools it invokes, examine a few different -environment variables. For many of these, you can see the default -value of on your system by running 'go env NAME', where NAME is the -name of the variable. -

-

-General-purpose environment variables: -

-
GCCGO
-	The gccgo command to run for 'go build -compiler=gccgo'.
-GOARCH
-	The architecture, or processor, for which to compile code.
-	Examples are amd64, 386, arm, ppc64.
-GOBIN
-	The directory where 'go install' will install a command.
-GOOS
-	The operating system for which to compile code.
-	Examples are linux, darwin, windows, netbsd.
-GOPATH
-	For more details see: 'go help gopath'.
-GORACE
-	Options for the race detector.
-	See https://golang.org/doc/articles/race_detector.html.
-GOROOT
-	The root of the go tree.
-
-

-Environment variables for use with cgo: -

-
CC
-	The command to use to compile C code.
-CGO_ENABLED
-	Whether the cgo command is supported.  Either 0 or 1.
-CGO_CFLAGS
-	Flags that cgo will pass to the compiler when compiling
-	C code.
-CGO_CPPFLAGS
-	Flags that cgo will pass to the compiler when compiling
-	C or C++ code.
-CGO_CXXFLAGS
-	Flags that cgo will pass to the compiler when compiling
-	C++ code.
-CGO_FFLAGS
-	Flags that cgo will pass to the compiler when compiling
-	Fortran code.
-CGO_LDFLAGS
-	Flags that cgo will pass to the compiler when linking.
-CXX
-	The command to use to compile C++ code.
-PKG_CONFIG
-	Path to pkg-config tool.
-
-

-Architecture-specific environment variables: -

-
GOARM
-	For GOARCH=arm, the ARM architecture for which to compile.
-	Valid values are 5, 6, 7.
-GO386
-	For GOARCH=386, the floating point instruction set.
-	Valid values are 387, sse2.
-
-

-Special-purpose environment variables: -

-
GOROOT_FINAL
-	The root of the installed Go tree, when it is
-	installed in a location other than where it is built.
-	File names in stack traces are rewritten from GOROOT to
-	GOROOT_FINAL.
-GO_EXTLINK_ENABLED
-	Whether the linker should use external linking mode
-	when using -linkmode=auto with code that uses cgo.
-	Set to 0 to disable external linking mode, 1 to enable it.
-GIT_ALLOW_PROTOCOL
-	Defined by Git. A colon-separated list of schemes that are allowed to be used
-	with git fetch/clone. If set, any scheme not explicitly mentioned will be
-	considered insecure by 'go get'.
-
-

Import path syntax

-

-An import path (see 'go help packages') denotes a package stored in the local -file system. In general, an import path denotes either a standard package (such -as "unicode/utf8") or a package found in one of the work spaces (For more -details see: 'go help gopath'). -

-

Relative import paths

-

-An import path beginning with ./ or ../ is called a relative path. -The toolchain supports relative import paths as a shortcut in two ways. -

-

-First, a relative path can be used as a shorthand on the command line. -If you are working in the directory containing the code imported as -"unicode" and want to run the tests for "unicode/utf8", you can type -"go test ./utf8" instead of needing to specify the full path. -Similarly, in the reverse situation, "go test .." will test "unicode" from -the "unicode/utf8" directory. Relative patterns are also allowed, like -"go test ./..." to test all subdirectories. See 'go help packages' for details -on the pattern syntax. -

-

-Second, if you are compiling a Go program not in a work space, -you can use a relative path in an import statement in that program -to refer to nearby code also not in a work space. -This makes it easy to experiment with small multipackage programs -outside of the usual work spaces, but such programs cannot be -installed with "go install" (there is no work space in which to install them), -so they are rebuilt from scratch each time they are built. -To avoid ambiguity, Go programs cannot use relative import paths -within a work space. -

-

Remote import paths

-

-Certain import paths also -describe how to obtain the source code for the package using -a revision control system. -

-

-A few common code hosting sites have special syntax: -

-
Bitbucket (Git, Mercurial)
-
-	import "bitbucket.org/user/project"
-	import "bitbucket.org/user/project/sub/directory"
-
-GitHub (Git)
-
-	import "github.com/user/project"
-	import "github.com/user/project/sub/directory"
-
-Launchpad (Bazaar)
-
-	import "launchpad.net/project"
-	import "launchpad.net/project/series"
-	import "launchpad.net/project/series/sub/directory"
-
-	import "launchpad.net/~user/project/branch"
-	import "launchpad.net/~user/project/branch/sub/directory"
-
-IBM DevOps Services (Git)
-
-	import "hub.jazz.net/git/user/project"
-	import "hub.jazz.net/git/user/project/sub/directory"
-
-

-For code hosted on other servers, import paths may either be qualified -with the version control type, or the go tool can dynamically fetch -the import path over https/http and discover where the code resides -from a <meta> tag in the HTML. -

-

-To declare the code location, an import path of the form -

-
repository.vcs/path
-
-

-specifies the given repository, with or without the .vcs suffix, -using the named version control system, and then the path inside -that repository. The supported version control systems are: -

-
Bazaar      .bzr
-Git         .git
-Mercurial   .hg
-Subversion  .svn
-
-

-For example, -

-
import "example.org/user/foo.hg"
-
-

-denotes the root directory of the Mercurial repository at -example.org/user/foo or foo.hg, and -

-
import "example.org/repo.git/foo/bar"
-
-

-denotes the foo/bar directory of the Git repository at -example.org/repo or repo.git. -

-

-When a version control system supports multiple protocols, -each is tried in turn when downloading. For example, a Git -download tries https://, then git+ssh://. -

-

-By default, downloads are restricted to known secure protocols -(e.g. https, ssh). To override this setting for Git downloads, the -GIT_ALLOW_PROTOCOL environment variable can be set (For more details see: -'go help environment'). -

-

-If the import path is not a known code hosting site and also lacks a -version control qualifier, the go tool attempts to fetch the import -over https/http and looks for a <meta> tag in the document's HTML -<head>. -

-

-The meta tag has the form: -

-
<meta name="go-import" content="import-prefix vcs repo-root">
-
-

-The import-prefix is the import path corresponding to the repository -root. It must be a prefix or an exact match of the package being -fetched with "go get". If it's not an exact match, another http -request is made at the prefix to verify the <meta> tags match. -

-

-The meta tag should appear as early in the file as possible. -In particular, it should appear before any raw JavaScript or CSS, -to avoid confusing the go command's restricted parser. -

-

-The vcs is one of "git", "hg", "svn", etc, -

-

-The repo-root is the root of the version control system -containing a scheme and not containing a .vcs qualifier. -

-

-For example, -

-
import "example.org/pkg/foo"
-
-

-will result in the following requests: -

-
https://example.org/pkg/foo?go-get=1 (preferred)
-http://example.org/pkg/foo?go-get=1  (fallback, only with -insecure)
-
-

-If that page contains the meta tag -

-
<meta name="go-import" content="example.org git https://code.org/r/p/exproj">
-
-

-the go tool will verify that https://example.org/?go-get=1 contains the -same meta tag and then git clone https://code.org/r/p/exproj into -GOPATH/src/example.org. -

-

-New downloaded packages are written to the first directory listed in the GOPATH -environment variable (For more details see: 'go help gopath'). -

-

-The go command attempts to download the version of the -package appropriate for the Go release being used. -Run 'go help get' for more. -

-

Import path checking

-

-When the custom import path feature described above redirects to a -known code hosting site, each of the resulting packages has two possible -import paths, using the custom domain or the known hosting site. -

-

-A package statement is said to have an "import comment" if it is immediately -followed (before the next newline) by a comment of one of these two forms: -

-
package math // import "path"
-package math /* import "path" */
-
-

-The go command will refuse to install a package with an import comment -unless it is being referred to by that import path. In this way, import comments -let package authors make sure the custom import path is used and not a -direct path to the underlying code hosting site. -

-

-Import path checking is disabled for code found within vendor trees. -This makes it possible to copy code into alternate locations in vendor trees -without needing to update import comments. -

-

-See https://golang.org/s/go14customimport for details. -

-

Description of package lists

-

-Many commands apply to a set of packages: -

-
go action [packages]
-
-

-Usually, [packages] is a list of import paths. -

-

-An import path that is a rooted path or that begins with -a . or .. element is interpreted as a file system path and -denotes the package in that directory. -

-

-Otherwise, the import path P denotes the package found in -the directory DIR/src/P for some DIR listed in the GOPATH -environment variable (For more details see: 'go help gopath'). -

-

-If no import paths are given, the action applies to the -package in the current directory. -

-

-There are four reserved names for paths that should not be used -for packages to be built with the go tool: -

-

-- "main" denotes the top-level package in a stand-alone executable. -

-

-- "all" expands to all package directories found in all the GOPATH -trees. For example, 'go list all' lists all the packages on the local -system. -

-

-- "std" is like all but expands to just the packages in the standard -Go library. -

-

-- "cmd" expands to the Go repository's commands and their -internal libraries. -

-

-Import paths beginning with "cmd/" only match source code in -the Go repository. -

-

-An import path is a pattern if it includes one or more "..." wildcards, -each of which can match any string, including the empty string and -strings containing slashes. Such a pattern expands to all package -directories found in the GOPATH trees with names matching the -patterns. As a special case, x/... matches x as well as x's subdirectories. -For example, net/... expands to net and packages in its subdirectories. -

-

-An import path can also name a package to be downloaded from -a remote repository. Run 'go help importpath' for details. -

-

-Every package in a program must have a unique import path. -By convention, this is arranged by starting each path with a -unique prefix that belongs to you. For example, paths used -internally at Google all begin with 'google', and paths -denoting remote repositories begin with the path to the code, -such as 'github.com/user/repo'. -

-

-Packages in a program need not have unique package names, -but there are two reserved package names with special meaning. -The name main indicates a command, not a library. -Commands are built into binaries and cannot be imported. -The name documentation indicates documentation for -a non-Go program in the directory. Files in package documentation -are ignored by the go command. -

-

-As a special case, if the package list is a list of .go files from a -single directory, the command is applied to a single synthesized -package made up of exactly those files, ignoring any build constraints -in those files and ignoring any other files in the directory. -

-

-Directory and file names that begin with "." or "_" are ignored -by the go tool, as are directories named "testdata". -

-

Description of testing flags

-

-The 'go test' command takes both flags that apply to 'go test' itself -and flags that apply to the resulting test binary. -

-

-Several of the flags control profiling and write an execution profile -suitable for "go tool pprof"; run "go tool pprof -h" for more -information. The --alloc_space, --alloc_objects, and --show_bytes -options of pprof control how the information is presented. -

-

-The following flags are recognized by the 'go test' command and -control the execution of any test: -

-
-bench regexp
-    Run (sub)benchmarks matching a regular expression.
-    The given regular expression is split into smaller ones by
-    top-level '/', where each must match the corresponding part of a
-    benchmark's identifier.
-    By default, no benchmarks run. To run all benchmarks,
-    use '-bench .' or '-bench=.'.
-
--benchtime t
-    Run enough iterations of each benchmark to take t, specified
-    as a time.Duration (for example, -benchtime 1h30s).
-    The default is 1 second (1s).
-
--count n
-    Run each test and benchmark n times (default 1).
-    If -cpu is set, run n times for each GOMAXPROCS value.
-    Examples are always run once.
-
--cover
-    Enable coverage analysis.
-
--covermode set,count,atomic
-    Set the mode for coverage analysis for the package[s]
-    being tested. The default is "set" unless -race is enabled,
-    in which case it is "atomic".
-    The values:
-	set: bool: does this statement run?
-	count: int: how many times does this statement run?
-	atomic: int: count, but correct in multithreaded tests;
-		significantly more expensive.
-    Sets -cover.
-
--coverpkg pkg1,pkg2,pkg3
-    Apply coverage analysis in each test to the given list of packages.
-    The default is for each test to analyze only the package being tested.
-    Packages are specified as import paths.
-    Sets -cover.
-
--cpu 1,2,4
-    Specify a list of GOMAXPROCS values for which the tests or
-    benchmarks should be executed.  The default is the current value
-    of GOMAXPROCS.
-
--parallel n
-    Allow parallel execution of test functions that call t.Parallel.
-    The value of this flag is the maximum number of tests to run
-    simultaneously; by default, it is set to the value of GOMAXPROCS.
-    Note that -parallel only applies within a single test binary.
-    The 'go test' command may run tests for different packages
-    in parallel as well, according to the setting of the -p flag
-    (see 'go help build').
-
--run regexp
-    Run only those tests and examples matching the regular expression.
-    For tests the regular expression is split into smaller ones by
-    top-level '/', where each must match the corresponding part of a
-    test's identifier.
-
--short
-    Tell long-running tests to shorten their run time.
-    It is off by default but set during all.bash so that installing
-    the Go tree can run a sanity check but not spend time running
-    exhaustive tests.
-
--timeout t
-    If a test runs longer than t, panic.
-    The default is 10 minutes (10m).
-
--v
-    Verbose output: log all tests as they are run. Also print all
-    text from Log and Logf calls even if the test succeeds.
-
-

-The following flags are also recognized by 'go test' and can be used to -profile the tests during execution: -

-
-benchmem
-    Print memory allocation statistics for benchmarks.
-
--blockprofile block.out
-    Write a goroutine blocking profile to the specified file
-    when all tests are complete.
-    Writes test binary as -c would.
-
--blockprofilerate n
-    Control the detail provided in goroutine blocking profiles by
-    calling runtime.SetBlockProfileRate with n.
-    See 'go doc runtime.SetBlockProfileRate'.
-    The profiler aims to sample, on average, one blocking event every
-    n nanoseconds the program spends blocked.  By default,
-    if -test.blockprofile is set without this flag, all blocking events
-    are recorded, equivalent to -test.blockprofilerate=1.
-
--coverprofile cover.out
-    Write a coverage profile to the file after all tests have passed.
-    Sets -cover.
-
--cpuprofile cpu.out
-    Write a CPU profile to the specified file before exiting.
-    Writes test binary as -c would.
-
--memprofile mem.out
-    Write a memory profile to the file after all tests have passed.
-    Writes test binary as -c would.
-
--memprofilerate n
-    Enable more precise (and expensive) memory profiles by setting
-    runtime.MemProfileRate.  See 'go doc runtime.MemProfileRate'.
-    To profile all memory allocations, use -test.memprofilerate=1
-    and pass --alloc_space flag to the pprof tool.
-
--mutexprofile mutex.out
-    Write a mutex contention profile to the specified file
-    when all tests are complete.
-    Writes test binary as -c would.
-
--mutexprofilefraction n
-    Sample 1 in n stack traces of goroutines holding a
-    contended mutex.
-
--outputdir directory
-    Place output files from profiling in the specified directory,
-    by default the directory in which "go test" is running.
-
--trace trace.out
-    Write an execution trace to the specified file before exiting.
-
-

-Each of these flags is also recognized with an optional 'test.' prefix, -as in -test.v. When invoking the generated test binary (the result of -'go test -c') directly, however, the prefix is mandatory. -

-

-The 'go test' command rewrites or removes recognized flags, -as appropriate, both before and after the optional package list, -before invoking the test binary. -

-

-For instance, the command -

-
go test -v -myflag testdata -cpuprofile=prof.out -x
-
-

-will compile the test binary and then run it as -

-
pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
-
-

-(The -x flag is removed because it applies only to the go command's -execution, not to the test itself.) -

-

-The test flags that generate profiles (other than for coverage) also -leave the test binary in pkg.test for use when analyzing the profiles. -

-

-When 'go test' runs a test binary, it does so from within the -corresponding package's source code directory. Depending on the test, -it may be necessary to do the same when invoking a generated test -binary directly. -

-

-The command-line package list, if present, must appear before any -flag not known to the go test command. Continuing the example above, -the package list would have to appear before -myflag, but could appear -on either side of -v. -

-

-To keep an argument for a test binary from being interpreted as a -known flag or a package name, use -args (see 'go help test') which -passes the remainder of the command line through to the test binary -uninterpreted and unaltered. -

-

-For instance, the command -

-
go test -v -args -x -v
-
-

-will compile the test binary and then run it as -

-
pkg.test -test.v -x -v
-
-

-Similarly, -

-
go test -args math
-
-

-will compile the test binary and then run it as -

-
pkg.test math
-
-

-In the first example, the -x and the second -v are passed through to the -test binary unchanged and with no effect on the go command itself. -In the second example, the argument math is passed through to the test -binary, instead of being interpreted as the package list. -

-

Description of testing functions

-

-The 'go test' command expects to find test, benchmark, and example functions -in the "*_test.go" files corresponding to the package under test. -

-

-A test function is one named TestXXX (where XXX is any alphanumeric string -not starting with a lower case letter) and should have the signature, -

-
func TestXXX(t *testing.T) { ... }
-
-

-A benchmark function is one named BenchmarkXXX and should have the signature, -

-
func BenchmarkXXX(b *testing.B) { ... }
-
-

-An example function is similar to a test function but, instead of using -*testing.T to report success or failure, prints output to os.Stdout. -If the last comment in the function starts with "Output:" then the output -is compared exactly against the comment (see examples below). If the last -comment begins with "Unordered output:" then the output is compared to the -comment, however the order of the lines is ignored. An example with no such -comment is compiled but not executed. An example with no text after -"Output:" is compiled, executed, and expected to produce no output. -

-

-Godoc displays the body of ExampleXXX to demonstrate the use -of the function, constant, or variable XXX. An example of a method M with -receiver type T or *T is named ExampleT_M. There may be multiple examples -for a given function, constant, or variable, distinguished by a trailing _xxx, -where xxx is a suffix not beginning with an upper case letter. -

-

-Here is an example of an example: -

-
func ExamplePrintln() {
-	Println("The output of\nthis example.")
-	// Output: The output of
-	// this example.
-}
-
-

-Here is another example where the ordering of the output is ignored: -

-
func ExamplePerm() {
-	for _, value := range Perm(4) {
-		fmt.Println(value)
-	}
-
-	// Unordered output: 4
-	// 2
-	// 1
-	// 3
-	// 0
-}
-
-

-The entire test file is presented as the example when it contains a single -example function, at least one other function, type, variable, or constant -declaration, and no test or benchmark functions. -

-

-See the documentation of the testing package for more information. -

- - - -
-
- - - - - - - - -`)) diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md deleted file mode 100644 index fb5c5ef..0000000 --- a/vendor/golang.org/x/net/http2/h2i/README.md +++ /dev/null @@ -1,97 +0,0 @@ -# h2i - -**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol' -days of telnetting to your HTTP/1.n servers? We're bringing you -back. - -Features: -- send raw HTTP/2 frames - - PING - - SETTINGS - - HEADERS - - etc -- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2 -- pretty print all received HTTP/2 frames from the peer (including HPACK decoding) -- tab completion of commands, options - -Not yet features, but soon: -- unnecessary CONTINUATION frames on short boundaries, to test peer implementations -- request bodies (DATA frames) -- send invalid frames for testing server implementations (supported by underlying Framer) - -Later: -- act like a server - -## Installation - -``` -$ go get golang.org/x/net/http2/h2i -$ h2i -``` - -## Demo - -``` -$ h2i -Usage: h2i - - -insecure - Whether to skip TLS cert validation - -nextproto string - Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14") - -$ h2i google.com -Connecting to google.com:443 ... -Connected to 74.125.224.41:443 -Negotiated protocol "h2-14" -[FrameHeader SETTINGS len=18] - [MAX_CONCURRENT_STREAMS = 100] - [INITIAL_WINDOW_SIZE = 1048576] - [MAX_FRAME_SIZE = 16384] -[FrameHeader WINDOW_UPDATE len=4] - Window-Increment = 983041 - -h2i> PING h2iSayHI -[FrameHeader PING flags=ACK len=8] - Data = "h2iSayHI" -h2i> headers -(as HTTP/1.1)> GET / HTTP/1.1 -(as HTTP/1.1)> Host: ip.appspot.com -(as HTTP/1.1)> User-Agent: h2i/brad-n-blake -(as HTTP/1.1)> -Opening Stream-ID 1: - :authority = ip.appspot.com - :method = GET - :path = / - :scheme = https - user-agent = h2i/brad-n-blake -[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77] - :status = "200" - alternate-protocol = "443:quic,p=1" - content-length = "15" - content-type = "text/html" - date = "Fri, 01 May 2015 23:06:56 GMT" - server = "Google Frontend" -[FrameHeader DATA flags=END_STREAM stream=1 len=15] - "173.164.155.78\n" -[FrameHeader PING len=8] - Data = "\x00\x00\x00\x00\x00\x00\x00\x00" -h2i> ping -[FrameHeader PING flags=ACK len=8] - Data = "h2i_ping" -h2i> ping -[FrameHeader PING flags=ACK len=8] - Data = "h2i_ping" -h2i> ping -[FrameHeader GOAWAY len=22] - Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1) - -ReadFrame: EOF -``` - -## Status - -Quick few hour hack. So much yet to do. Feel free to file issues for -bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/) -and I aren't yet accepting pull requests until things settle down. - diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go deleted file mode 100644 index 62e5752..0000000 --- a/vendor/golang.org/x/net/http2/h2i/h2i.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -/* -The h2i command is an interactive HTTP/2 console. - -Usage: - $ h2i [flags] - -Interactive commands in the console: (all parts case-insensitive) - - ping [data] - settings ack - settings FOO=n BAR=z - headers (open a new stream by typing HTTP/1.1) -*/ -package main - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "flag" - "fmt" - "io" - "log" - "net" - "net/http" - "os" - "regexp" - "strconv" - "strings" - - "golang.org/x/crypto/ssh/terminal" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -// Flags -var ( - flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.") - flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation") - flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.") - flagDial = flag.String("dial", "", "optional ip:port to dial, to connect to a host:port but use a different SNI name (including a SNI name without DNS)") -) - -type command struct { - run func(*h2i, []string) error // required - - // complete optionally specifies tokens (case-insensitive) which are - // valid for this subcommand. - complete func() []string -} - -var commands = map[string]command{ - "ping": {run: (*h2i).cmdPing}, - "settings": { - run: (*h2i).cmdSettings, - complete: func() []string { - return []string{ - "ACK", - http2.SettingHeaderTableSize.String(), - http2.SettingEnablePush.String(), - http2.SettingMaxConcurrentStreams.String(), - http2.SettingInitialWindowSize.String(), - http2.SettingMaxFrameSize.String(), - http2.SettingMaxHeaderListSize.String(), - } - }, - }, - "quit": {run: (*h2i).cmdQuit}, - "headers": {run: (*h2i).cmdHeaders}, -} - -func usage() { - fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") - flag.PrintDefaults() -} - -// withPort adds ":443" if another port isn't already present. -func withPort(host string) string { - if _, _, err := net.SplitHostPort(host); err != nil { - return net.JoinHostPort(host, "443") - } - return host -} - -// withoutPort strips the port from addr if present. -func withoutPort(addr string) string { - if h, _, err := net.SplitHostPort(addr); err == nil { - return h - } - return addr -} - -// h2i is the app's state. -type h2i struct { - host string - tc *tls.Conn - framer *http2.Framer - term *terminal.Terminal - - // owned by the command loop: - streamID uint32 - hbuf bytes.Buffer - henc *hpack.Encoder - - // owned by the readFrames loop: - peerSetting map[http2.SettingID]uint32 - hdec *hpack.Decoder -} - -func main() { - flag.Usage = usage - flag.Parse() - if flag.NArg() != 1 { - usage() - os.Exit(2) - } - log.SetFlags(0) - - host := flag.Arg(0) - app := &h2i{ - host: host, - peerSetting: make(map[http2.SettingID]uint32), - } - app.henc = hpack.NewEncoder(&app.hbuf) - - if err := app.Main(); err != nil { - if app.term != nil { - app.logf("%v\n", err) - } else { - fmt.Fprintf(os.Stderr, "%v\n", err) - } - os.Exit(1) - } - fmt.Fprintf(os.Stdout, "\n") -} - -func (app *h2i) Main() error { - cfg := &tls.Config{ - ServerName: withoutPort(app.host), - NextProtos: strings.Split(*flagNextProto, ","), - InsecureSkipVerify: *flagInsecure, - } - - hostAndPort := *flagDial - if hostAndPort == "" { - hostAndPort = withPort(app.host) - } - log.Printf("Connecting to %s ...", hostAndPort) - tc, err := tls.Dial("tcp", hostAndPort, cfg) - if err != nil { - return fmt.Errorf("Error dialing %s: %v", hostAndPort, err) - } - log.Printf("Connected to %v", tc.RemoteAddr()) - defer tc.Close() - - if err := tc.Handshake(); err != nil { - return fmt.Errorf("TLS handshake: %v", err) - } - if !*flagInsecure { - if err := tc.VerifyHostname(app.host); err != nil { - return fmt.Errorf("VerifyHostname: %v", err) - } - } - state := tc.ConnectionState() - log.Printf("Negotiated protocol %q", state.NegotiatedProtocol) - if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" { - return fmt.Errorf("Could not negotiate protocol mutually") - } - - if _, err := io.WriteString(tc, http2.ClientPreface); err != nil { - return err - } - - app.framer = http2.NewFramer(tc, tc) - - oldState, err := terminal.MakeRaw(int(os.Stdin.Fd())) - if err != nil { - return err - } - defer terminal.Restore(0, oldState) - - var screen = struct { - io.Reader - io.Writer - }{os.Stdin, os.Stdout} - - app.term = terminal.NewTerminal(screen, "h2i> ") - lastWord := regexp.MustCompile(`.+\W(\w+)$`) - app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { - if key != '\t' { - return - } - if pos != len(line) { - // TODO: we're being lazy for now, only supporting tab completion at the end. - return - } - // Auto-complete for the command itself. - if !strings.Contains(line, " ") { - var name string - name, _, ok = lookupCommand(line) - if !ok { - return - } - return name, len(name), true - } - _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')]) - if !ok || c.complete == nil { - return - } - if strings.HasSuffix(line, " ") { - app.logf("%s", strings.Join(c.complete(), " ")) - return line, pos, true - } - m := lastWord.FindStringSubmatch(line) - if m == nil { - return line, len(line), true - } - soFar := m[1] - var match []string - for _, cand := range c.complete() { - if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) { - continue - } - match = append(match, cand) - } - if len(match) == 0 { - return - } - if len(match) > 1 { - // TODO: auto-complete any common prefix - app.logf("%s", strings.Join(match, " ")) - return line, pos, true - } - newLine = line[:len(line)-len(soFar)] + match[0] - return newLine, len(newLine), true - - } - - errc := make(chan error, 2) - go func() { errc <- app.readFrames() }() - go func() { errc <- app.readConsole() }() - return <-errc -} - -func (app *h2i) logf(format string, args ...interface{}) { - fmt.Fprintf(app.term, format+"\r\n", args...) -} - -func (app *h2i) readConsole() error { - if s := *flagSettings; s != "omit" { - var args []string - if s != "empty" { - args = strings.Split(s, ",") - } - _, c, ok := lookupCommand("settings") - if !ok { - panic("settings command not found") - } - c.run(app, args) - } - - for { - line, err := app.term.ReadLine() - if err == io.EOF { - return nil - } - if err != nil { - return fmt.Errorf("terminal.ReadLine: %v", err) - } - f := strings.Fields(line) - if len(f) == 0 { - continue - } - cmd, args := f[0], f[1:] - if _, c, ok := lookupCommand(cmd); ok { - err = c.run(app, args) - } else { - app.logf("Unknown command %q", line) - } - if err == errExitApp { - return nil - } - if err != nil { - return err - } - } -} - -func lookupCommand(prefix string) (name string, c command, ok bool) { - prefix = strings.ToLower(prefix) - if c, ok = commands[prefix]; ok { - return prefix, c, ok - } - - for full, candidate := range commands { - if strings.HasPrefix(full, prefix) { - if c.run != nil { - return "", command{}, false // ambiguous - } - c = candidate - name = full - } - } - return name, c, c.run != nil -} - -var errExitApp = errors.New("internal sentinel error value to quit the console reading loop") - -func (a *h2i) cmdQuit(args []string) error { - if len(args) > 0 { - a.logf("the QUIT command takes no argument") - return nil - } - return errExitApp -} - -func (a *h2i) cmdSettings(args []string) error { - if len(args) == 1 && strings.EqualFold(args[0], "ACK") { - return a.framer.WriteSettingsAck() - } - var settings []http2.Setting - for _, arg := range args { - if strings.EqualFold(arg, "ACK") { - a.logf("Error: ACK must be only argument with the SETTINGS command") - return nil - } - eq := strings.Index(arg, "=") - if eq == -1 { - a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) - return nil - } - sid, ok := settingByName(arg[:eq]) - if !ok { - a.logf("Error: unknown setting name %q", arg[:eq]) - return nil - } - val, err := strconv.ParseUint(arg[eq+1:], 10, 32) - if err != nil { - a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) - return nil - } - settings = append(settings, http2.Setting{ - ID: sid, - Val: uint32(val), - }) - } - a.logf("Sending: %v", settings) - return a.framer.WriteSettings(settings...) -} - -func settingByName(name string) (http2.SettingID, bool) { - for _, sid := range [...]http2.SettingID{ - http2.SettingHeaderTableSize, - http2.SettingEnablePush, - http2.SettingMaxConcurrentStreams, - http2.SettingInitialWindowSize, - http2.SettingMaxFrameSize, - http2.SettingMaxHeaderListSize, - } { - if strings.EqualFold(sid.String(), name) { - return sid, true - } - } - return 0, false -} - -func (app *h2i) cmdPing(args []string) error { - if len(args) > 1 { - app.logf("invalid PING usage: only accepts 0 or 1 args") - return nil // nil means don't end the program - } - var data [8]byte - if len(args) == 1 { - copy(data[:], args[0]) - } else { - copy(data[:], "h2i_ping") - } - return app.framer.WritePing(false, data) -} - -func (app *h2i) cmdHeaders(args []string) error { - if len(args) > 0 { - app.logf("Error: HEADERS doesn't yet take arguments.") - // TODO: flags for restricting window size, to force CONTINUATION - // frames. - return nil - } - var h1req bytes.Buffer - app.term.SetPrompt("(as HTTP/1.1)> ") - defer app.term.SetPrompt("h2i> ") - for { - line, err := app.term.ReadLine() - if err != nil { - return err - } - h1req.WriteString(line) - h1req.WriteString("\r\n") - if line == "" { - break - } - } - req, err := http.ReadRequest(bufio.NewReader(&h1req)) - if err != nil { - app.logf("Invalid HTTP/1.1 request: %v", err) - return nil - } - if app.streamID == 0 { - app.streamID = 1 - } else { - app.streamID += 2 - } - app.logf("Opening Stream-ID %d:", app.streamID) - hbf := app.encodeHeaders(req) - if len(hbf) > 16<<10 { - app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go") - return nil - } - return app.framer.WriteHeaders(http2.HeadersFrameParam{ - StreamID: app.streamID, - BlockFragment: hbf, - EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now - EndHeaders: true, // for now - }) -} - -func (app *h2i) readFrames() error { - for { - f, err := app.framer.ReadFrame() - if err != nil { - return fmt.Errorf("ReadFrame: %v", err) - } - app.logf("%v", f) - switch f := f.(type) { - case *http2.PingFrame: - app.logf(" Data = %q", f.Data) - case *http2.SettingsFrame: - f.ForeachSetting(func(s http2.Setting) error { - app.logf(" %v", s) - app.peerSetting[s.ID] = s.Val - return nil - }) - case *http2.WindowUpdateFrame: - app.logf(" Window-Increment = %v", f.Increment) - case *http2.GoAwayFrame: - app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)", f.LastStreamID, f.ErrCode, f.ErrCode) - case *http2.DataFrame: - app.logf(" %q", f.Data()) - case *http2.HeadersFrame: - if f.HasPriority() { - app.logf(" PRIORITY = %v", f.Priority) - } - if app.hdec == nil { - // TODO: if the user uses h2i to send a SETTINGS frame advertising - // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE - // and stuff here instead of using the 4k default. But for now: - tableSize := uint32(4 << 10) - app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) - } - app.hdec.Write(f.HeaderBlockFragment()) - case *http2.PushPromiseFrame: - if app.hdec == nil { - // TODO: if the user uses h2i to send a SETTINGS frame advertising - // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE - // and stuff here instead of using the 4k default. But for now: - tableSize := uint32(4 << 10) - app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) - } - app.hdec.Write(f.HeaderBlockFragment()) - } - } -} - -// called from readLoop -func (app *h2i) onNewHeaderField(f hpack.HeaderField) { - if f.Sensitive { - app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value) - } - app.logf(" %s = %q", f.Name, f.Value) -} - -func (app *h2i) encodeHeaders(req *http.Request) []byte { - app.hbuf.Reset() - - // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go - host := req.Host - if host == "" { - host = req.URL.Host - } - - path := req.RequestURI - if path == "" { - path = "/" - } - - app.writeHeader(":authority", host) // probably not right for all sites - app.writeHeader(":method", req.Method) - app.writeHeader(":path", path) - app.writeHeader(":scheme", "https") - - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - if lowKey == "host" { - continue - } - for _, v := range vv { - app.writeHeader(lowKey, v) - } - } - return app.hbuf.Bytes() -} - -func (app *h2i) writeHeader(name, value string) { - app.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) - app.logf(" %s = %s", name, value) -} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go deleted file mode 100644 index c2805f6..0000000 --- a/vendor/golang.org/x/net/http2/headermap.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "net/http" - "strings" -) - -var ( - commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case - commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case -) - -func init() { - for _, v := range []string{ - "accept", - "accept-charset", - "accept-encoding", - "accept-language", - "accept-ranges", - "age", - "access-control-allow-origin", - "allow", - "authorization", - "cache-control", - "content-disposition", - "content-encoding", - "content-language", - "content-length", - "content-location", - "content-range", - "content-type", - "cookie", - "date", - "etag", - "expect", - "expires", - "from", - "host", - "if-match", - "if-modified-since", - "if-none-match", - "if-unmodified-since", - "last-modified", - "link", - "location", - "max-forwards", - "proxy-authenticate", - "proxy-authorization", - "range", - "referer", - "refresh", - "retry-after", - "server", - "set-cookie", - "strict-transport-security", - "trailer", - "transfer-encoding", - "user-agent", - "vary", - "via", - "www-authenticate", - } { - chk := http.CanonicalHeaderKey(v) - commonLowerHeader[chk] = v - commonCanonHeader[v] = chk - } -} - -func lowerHeader(v string) string { - if s, ok := commonLowerHeader[v]; ok { - return s - } - return strings.ToLower(v) -} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go deleted file mode 100644 index 1565cf2..0000000 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "io" -) - -const ( - uint32Max = ^uint32(0) - initialHeaderTableSize = 4096 -) - -type Encoder struct { - dynTab dynamicTable - // minSize is the minimum table size set by - // SetMaxDynamicTableSize after the previous Header Table Size - // Update. - minSize uint32 - // maxSizeLimit is the maximum table size this encoder - // supports. This will protect the encoder from too large - // size. - maxSizeLimit uint32 - // tableSizeUpdate indicates whether "Header Table Size - // Update" is required. - tableSizeUpdate bool - w io.Writer - buf []byte -} - -// NewEncoder returns a new Encoder which performs HPACK encoding. An -// encoded data is written to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{ - minSize: uint32Max, - maxSizeLimit: initialHeaderTableSize, - tableSizeUpdate: false, - w: w, - } - e.dynTab.table.init() - e.dynTab.setMaxSize(initialHeaderTableSize) - return e -} - -// WriteField encodes f into a single Write to e's underlying Writer. -// This function may also produce bytes for "Header Table Size Update" -// if necessary. If produced, it is done before encoding f. -func (e *Encoder) WriteField(f HeaderField) error { - e.buf = e.buf[:0] - - if e.tableSizeUpdate { - e.tableSizeUpdate = false - if e.minSize < e.dynTab.maxSize { - e.buf = appendTableSize(e.buf, e.minSize) - } - e.minSize = uint32Max - e.buf = appendTableSize(e.buf, e.dynTab.maxSize) - } - - idx, nameValueMatch := e.searchTable(f) - if nameValueMatch { - e.buf = appendIndexed(e.buf, idx) - } else { - indexing := e.shouldIndex(f) - if indexing { - e.dynTab.add(f) - } - - if idx == 0 { - e.buf = appendNewName(e.buf, f, indexing) - } else { - e.buf = appendIndexedName(e.buf, f, idx, indexing) - } - } - n, err := e.w.Write(e.buf) - if err == nil && n != len(e.buf) { - err = io.ErrShortWrite - } - return err -} - -// searchTable searches f in both stable and dynamic header tables. -// The static header table is searched first. Only when there is no -// exact match for both name and value, the dynamic header table is -// then searched. If there is no match, i is 0. If both name and value -// match, i is the matched index and nameValueMatch becomes true. If -// only name matches, i points to that index and nameValueMatch -// becomes false. -func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - i, nameValueMatch = staticTable.search(f) - if nameValueMatch { - return i, true - } - - j, nameValueMatch := e.dynTab.table.search(f) - if nameValueMatch || (i == 0 && j != 0) { - return j + uint64(staticTable.len()), nameValueMatch - } - - return i, false -} - -// SetMaxDynamicTableSize changes the dynamic header table size to v. -// The actual size is bounded by the value passed to -// SetMaxDynamicTableSizeLimit. -func (e *Encoder) SetMaxDynamicTableSize(v uint32) { - if v > e.maxSizeLimit { - v = e.maxSizeLimit - } - if v < e.minSize { - e.minSize = v - } - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) -} - -// SetMaxDynamicTableSizeLimit changes the maximum value that can be -// specified in SetMaxDynamicTableSize to v. By default, it is set to -// 4096, which is the same size of the default dynamic header table -// size described in HPACK specification. If the current maximum -// dynamic header table size is strictly greater than v, "Header Table -// Size Update" will be done in the next WriteField call and the -// maximum dynamic header table size is truncated to v. -func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { - e.maxSizeLimit = v - if e.dynTab.maxSize > v { - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) - } -} - -// shouldIndex reports whether f should be indexed. -func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.Size() <= e.dynTab.maxSize -} - -// appendIndexed appends index i, as encoded in "Indexed Header Field" -// representation, to dst and returns the extended buffer. -func appendIndexed(dst []byte, i uint64) []byte { - first := len(dst) - dst = appendVarInt(dst, 7, i) - dst[first] |= 0x80 - return dst -} - -// appendNewName appends f, as encoded in one of "Literal Header field -// - New Name" representation variants, to dst and returns the -// extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Inremental Indexing" -// representation is used. -func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { - dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) - dst = appendHpackString(dst, f.Name) - return appendHpackString(dst, f.Value) -} - -// appendIndexedName appends f and index i referring indexed name -// entry, as encoded in one of "Literal Header field - Indexed Name" -// representation variants, to dst and returns the extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Incremental Indexing" -// representation is used. -func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { - first := len(dst) - var n byte - if indexing { - n = 6 - } else { - n = 4 - } - dst = appendVarInt(dst, n, i) - dst[first] |= encodeTypeByte(indexing, f.Sensitive) - return appendHpackString(dst, f.Value) -} - -// appendTableSize appends v, as encoded in "Header Table Size Update" -// representation, to dst and returns the extended buffer. -func appendTableSize(dst []byte, v uint32) []byte { - first := len(dst) - dst = appendVarInt(dst, 5, uint64(v)) - dst[first] |= 0x20 - return dst -} - -// appendVarInt appends i, as encoded in variable integer form using n -// bit prefix, to dst and returns the extended buffer. -// -// See -// http://http2.github.io/http2-spec/compression.html#integer.representation -func appendVarInt(dst []byte, n byte, i uint64) []byte { - k := uint64((1 << n) - 1) - if i < k { - return append(dst, byte(i)) - } - dst = append(dst, byte(k)) - i -= k - for ; i >= 128; i >>= 7 { - dst = append(dst, byte(0x80|(i&0x7f))) - } - return append(dst, byte(i)) -} - -// appendHpackString appends s, as encoded in "String Literal" -// representation, to dst and returns the extended buffer. -// -// s will be encoded in Huffman codes only when it produces strictly -// shorter byte string. -func appendHpackString(dst []byte, s string) []byte { - huffmanLength := HuffmanEncodeLength(s) - if huffmanLength < uint64(len(s)) { - first := len(dst) - dst = appendVarInt(dst, 7, huffmanLength) - dst = AppendHuffmanString(dst, s) - dst[first] |= 0x80 - } else { - dst = appendVarInt(dst, 7, uint64(len(s))) - dst = append(dst, s...) - } - return dst -} - -// encodeTypeByte returns type byte. If sensitive is true, type byte -// for "Never Indexed" representation is returned. If sensitive is -// false and indexing is true, type byte for "Incremental Indexing" -// representation is returned. Otherwise, type byte for "Without -// Indexing" is returned. -func encodeTypeByte(indexing, sensitive bool) byte { - if sensitive { - return 0x10 - } - if indexing { - return 0x40 - } - return 0 -} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go deleted file mode 100644 index 166788c..0000000 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package hpack implements HPACK, a compression format for -// efficiently representing HTTP header fields in the context of HTTP/2. -// -// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 -package hpack - -import ( - "bytes" - "errors" - "fmt" -) - -// A DecodingError is something the spec defines as a decoding error. -type DecodingError struct { - Err error -} - -func (de DecodingError) Error() string { - return fmt.Sprintf("decoding error: %v", de.Err) -} - -// An InvalidIndexError is returned when an encoder references a table -// entry before the static table or after the end of the dynamic table. -type InvalidIndexError int - -func (e InvalidIndexError) Error() string { - return fmt.Sprintf("invalid indexed representation index %d", int(e)) -} - -// A HeaderField is a name-value pair. Both the name and value are -// treated as opaque sequences of octets. -type HeaderField struct { - Name, Value string - - // Sensitive means that this header field should never be - // indexed. - Sensitive bool -} - -// IsPseudo reports whether the header field is an http2 pseudo header. -// That is, it reports whether it starts with a colon. -// It is not otherwise guaranteed to be a valid pseudo header field, -// though. -func (hf HeaderField) IsPseudo() bool { - return len(hf.Name) != 0 && hf.Name[0] == ':' -} - -func (hf HeaderField) String() string { - var suffix string - if hf.Sensitive { - suffix = " (sensitive)" - } - return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) -} - -// Size returns the size of an entry per RFC 7541 section 4.1. -func (hf HeaderField) Size() uint32 { - // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 - // "The size of the dynamic table is the sum of the size of - // its entries. The size of an entry is the sum of its name's - // length in octets (as defined in Section 5.2), its value's - // length in octets (see Section 5.2), plus 32. The size of - // an entry is calculated using the length of the name and - // value without any Huffman encoding applied." - - // This can overflow if somebody makes a large HeaderField - // Name and/or Value by hand, but we don't care, because that - // won't happen on the wire because the encoding doesn't allow - // it. - return uint32(len(hf.Name) + len(hf.Value) + 32) -} - -// A Decoder is the decoding context for incremental processing of -// header blocks. -type Decoder struct { - dynTab dynamicTable - emit func(f HeaderField) - - emitEnabled bool // whether calls to emit are enabled - maxStrLen int // 0 means unlimited - - // buf is the unparsed buffer. It's only written to - // saveBuf if it was truncated in the middle of a header - // block. Because it's usually not owned, we can only - // process it under Write. - buf []byte // not owned; only valid during Write - - // saveBuf is previous data passed to Write which we weren't able - // to fully parse before. Unlike buf, we own this data. - saveBuf bytes.Buffer -} - -// NewDecoder returns a new decoder with the provided maximum dynamic -// table size. The emitFunc will be called for each valid field -// parsed, in the same goroutine as calls to Write, before Write returns. -func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { - d := &Decoder{ - emit: emitFunc, - emitEnabled: true, - } - d.dynTab.table.init() - d.dynTab.allowedMaxSize = maxDynamicTableSize - d.dynTab.setMaxSize(maxDynamicTableSize) - return d -} - -// ErrStringLength is returned by Decoder.Write when the max string length -// (as configured by Decoder.SetMaxStringLength) would be violated. -var ErrStringLength = errors.New("hpack: string too long") - -// SetMaxStringLength sets the maximum size of a HeaderField name or -// value string. If a string exceeds this length (even after any -// decompression), Write will return ErrStringLength. -// A value of 0 means unlimited and is the default from NewDecoder. -func (d *Decoder) SetMaxStringLength(n int) { - d.maxStrLen = n -} - -// SetEmitFunc changes the callback used when new header fields -// are decoded. -// It must be non-nil. It does not affect EmitEnabled. -func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { - d.emit = emitFunc -} - -// SetEmitEnabled controls whether the emitFunc provided to NewDecoder -// should be called. The default is true. -// -// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE -// while still decoding and keeping in-sync with decoder state, but -// without doing unnecessary decompression or generating unnecessary -// garbage for header fields past the limit. -func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } - -// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder -// are currently enabled. The default is true. -func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } - -// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their -// underlying buffers for garbage reasons. - -func (d *Decoder) SetMaxDynamicTableSize(v uint32) { - d.dynTab.setMaxSize(v) -} - -// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded -// stream (via dynamic table size updates) may set the maximum size -// to. -func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { - d.dynTab.allowedMaxSize = v -} - -type dynamicTable struct { - // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - table headerFieldTable - size uint32 // in bytes - maxSize uint32 // current maxSize - allowedMaxSize uint32 // maxSize may go up to this, inclusive -} - -func (dt *dynamicTable) setMaxSize(v uint32) { - dt.maxSize = v - dt.evict() -} - -func (dt *dynamicTable) add(f HeaderField) { - dt.table.addEntry(f) - dt.size += f.Size() - dt.evict() -} - -// If we're too big, evict old stuff. -func (dt *dynamicTable) evict() { - var n int - for dt.size > dt.maxSize && n < dt.table.len() { - dt.size -= dt.table.ents[n].Size() - n++ - } - dt.table.evictOldest(n) -} - -func (d *Decoder) maxTableIndex() int { - // This should never overflow. RFC 7540 Section 6.5.2 limits the size of - // the dynamic table to 2^32 bytes, where each entry will occupy more than - // one byte. Further, the staticTable has a fixed, small length. - return d.dynTab.table.len() + staticTable.len() -} - -func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - // See Section 2.3.3. - if i == 0 { - return - } - if i <= uint64(staticTable.len()) { - return staticTable.ents[i-1], true - } - if i > uint64(d.maxTableIndex()) { - return - } - // In the dynamic table, newer entries have lower indices. - // However, dt.ents[0] is the oldest entry. Hence, dt.ents is - // the reversed dynamic table. - dt := d.dynTab.table - return dt.ents[dt.len()-(int(i)-staticTable.len())], true -} - -// Decode decodes an entire block. -// -// TODO: remove this method and make it incremental later? This is -// easier for debugging now. -func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { - var hf []HeaderField - saveFunc := d.emit - defer func() { d.emit = saveFunc }() - d.emit = func(f HeaderField) { hf = append(hf, f) } - if _, err := d.Write(p); err != nil { - return nil, err - } - if err := d.Close(); err != nil { - return nil, err - } - return hf, nil -} - -func (d *Decoder) Close() error { - if d.saveBuf.Len() > 0 { - d.saveBuf.Reset() - return DecodingError{errors.New("truncated headers")} - } - return nil -} - -func (d *Decoder) Write(p []byte) (n int, err error) { - if len(p) == 0 { - // Prevent state machine CPU attacks (making us redo - // work up to the point of finding out we don't have - // enough data) - return - } - // Only copy the data if we have to. Optimistically assume - // that p will contain a complete header block. - if d.saveBuf.Len() == 0 { - d.buf = p - } else { - d.saveBuf.Write(p) - d.buf = d.saveBuf.Bytes() - d.saveBuf.Reset() - } - - for len(d.buf) > 0 { - err = d.parseHeaderFieldRepr() - if err == errNeedMore { - // Extra paranoia, making sure saveBuf won't - // get too large. All the varint and string - // reading code earlier should already catch - // overlong things and return ErrStringLength, - // but keep this as a last resort. - const varIntOverhead = 8 // conservative - if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { - return 0, ErrStringLength - } - d.saveBuf.Write(d.buf) - return len(p), nil - } - if err != nil { - break - } - } - return len(p), err -} - -// errNeedMore is an internal sentinel error value that means the -// buffer is truncated and we need to read more data before we can -// continue parsing. -var errNeedMore = errors.New("need more data") - -type indexType int - -const ( - indexedTrue indexType = iota - indexedFalse - indexedNever -) - -func (v indexType) indexed() bool { return v == indexedTrue } -func (v indexType) sensitive() bool { return v == indexedNever } - -// returns errNeedMore if there isn't enough data available. -// any other error is fatal. -// consumes d.buf iff it returns nil. -// precondition: must be called with len(d.buf) > 0 -func (d *Decoder) parseHeaderFieldRepr() error { - b := d.buf[0] - switch { - case b&128 != 0: - // Indexed representation. - // High bit set? - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 - return d.parseFieldIndexed() - case b&192 == 64: - // 6.2.1 Literal Header Field with Incremental Indexing - // 0b10xxxxxx: top two bits are 10 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 - return d.parseFieldLiteral(6, indexedTrue) - case b&240 == 0: - // 6.2.2 Literal Header Field without Indexing - // 0b0000xxxx: top four bits are 0000 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 - return d.parseFieldLiteral(4, indexedFalse) - case b&240 == 16: - // 6.2.3 Literal Header Field never Indexed - // 0b0001xxxx: top four bits are 0001 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 - return d.parseFieldLiteral(4, indexedNever) - case b&224 == 32: - // 6.3 Dynamic Table Size Update - // Top three bits are '001'. - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 - return d.parseDynamicTableSizeUpdate() - } - - return DecodingError{errors.New("invalid encoding")} -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldIndexed() error { - buf := d.buf - idx, buf, err := readVarInt(7, buf) - if err != nil { - return err - } - hf, ok := d.at(idx) - if !ok { - return DecodingError{InvalidIndexError(idx)} - } - d.buf = buf - return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { - buf := d.buf - nameIdx, buf, err := readVarInt(n, buf) - if err != nil { - return err - } - - var hf HeaderField - wantStr := d.emitEnabled || it.indexed() - if nameIdx > 0 { - ihf, ok := d.at(nameIdx) - if !ok { - return DecodingError{InvalidIndexError(nameIdx)} - } - hf.Name = ihf.Name - } else { - hf.Name, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - } - hf.Value, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - d.buf = buf - if it.indexed() { - d.dynTab.add(hf) - } - hf.Sensitive = it.sensitive() - return d.callEmit(hf) -} - -func (d *Decoder) callEmit(hf HeaderField) error { - if d.maxStrLen != 0 { - if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { - return ErrStringLength - } - } - if d.emitEnabled { - d.emit(hf) - } - return nil -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseDynamicTableSizeUpdate() error { - // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the - // beginning of the first header block following the change to the dynamic table size. - if d.dynTab.size > 0 { - return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} - } - - buf := d.buf - size, buf, err := readVarInt(5, buf) - if err != nil { - return err - } - if size > uint64(d.dynTab.allowedMaxSize) { - return DecodingError{errors.New("dynamic table size update too large")} - } - d.dynTab.setMaxSize(uint32(size)) - d.buf = buf - return nil -} - -var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} - -// readVarInt reads an unsigned variable length integer off the -// beginning of p. n is the parameter as described in -// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. -// -// n must always be between 1 and 8. -// -// The returned remain buffer is either a smaller suffix of p, or err != nil. -// The error is errNeedMore if p doesn't contain a complete integer. -func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { - if n < 1 || n > 8 { - panic("bad n") - } - if len(p) == 0 { - return 0, p, errNeedMore - } - i = uint64(p[0]) - if n < 8 { - i &= (1 << uint64(n)) - 1 - } - if i < (1< 0 { - b := p[0] - p = p[1:] - i += uint64(b&127) << m - if b&128 == 0 { - return i, p, nil - } - m += 7 - if m >= 63 { // TODO: proper overflow check. making this up. - return 0, origP, errVarintOverflow - } - } - return 0, origP, errNeedMore -} - -// readString decodes an hpack string from p. -// -// wantStr is whether s will be used. If false, decompression and -// []byte->string garbage are skipped if s will be ignored -// anyway. This does mean that huffman decoding errors for non-indexed -// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server -// is returning an error anyway, and because they're not indexed, the error -// won't affect the decoding state. -func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { - if len(p) == 0 { - return "", p, errNeedMore - } - isHuff := p[0]&128 != 0 - strLen, p, err := readVarInt(7, p) - if err != nil { - return "", p, err - } - if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { - return "", nil, ErrStringLength - } - if uint64(len(p)) < strLen { - return "", p, errNeedMore - } - if !isHuff { - if wantStr { - s = string(p[:strLen]) - } - return s, p[strLen:], nil - } - - if wantStr { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() // don't trust others - defer bufPool.Put(buf) - if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { - buf.Reset() - return "", nil, err - } - s = buf.String() - buf.Reset() // be nice to GC - } - return s, p[strLen:], nil -} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go deleted file mode 100644 index 8850e39..0000000 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "bytes" - "errors" - "io" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { return new(bytes.Buffer) }, -} - -// HuffmanDecode decodes the string in v and writes the expanded -// result to w, returning the number of bytes written to w and the -// Write call's return value. At most one Write call is made. -func HuffmanDecode(w io.Writer, v []byte) (int, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return 0, err - } - return w.Write(buf.Bytes()) -} - -// HuffmanDecodeToString decodes the string in v. -func HuffmanDecodeToString(v []byte) (string, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return "", err - } - return buf.String(), nil -} - -// ErrInvalidHuffman is returned for errors found decoding -// Huffman-encoded strings. -var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") - -// huffmanDecode decodes v to buf. -// If maxLen is greater than 0, attempts to write more to buf than -// maxLen bytes will return ErrStringLength. -func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { - n := rootHuffmanNode - // cur is the bit buffer that has not been fed into n. - // cbits is the number of low order bits in cur that are valid. - // sbits is the number of bits of the symbol prefix being decoded. - cur, cbits, sbits := uint(0), uint8(0), uint8(0) - for _, b := range v { - cur = cur<<8 | uint(b) - cbits += 8 - sbits += 8 - for cbits >= 8 { - idx := byte(cur >> (cbits - 8)) - n = n.children[idx] - if n == nil { - return ErrInvalidHuffman - } - if n.children == nil { - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } else { - cbits -= 8 - } - } - } - for cbits > 0 { - n = n.children[byte(cur<<(8-cbits))] - if n == nil { - return ErrInvalidHuffman - } - if n.children != nil || n.codeLen > cbits { - break - } - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } - if sbits > 7 { - // Either there was an incomplete symbol, or overlong padding. - // Both are decoding errors per RFC 7541 section 5.2. - return ErrInvalidHuffman - } - if mask := uint(1< 8 { - codeLen -= 8 - i := uint8(code >> codeLen) - if cur.children[i] == nil { - cur.children[i] = newInternalNode() - } - cur = cur.children[i] - } - shift := 8 - codeLen - start, end := int(uint8(code<> (nbits - rembits)) - dst[len(dst)-1] |= t - } - - return dst -} - -// HuffmanEncodeLength returns the number of bytes required to encode -// s in Huffman codes. The result is round up to byte boundary. -func HuffmanEncodeLength(s string) uint64 { - n := uint64(0) - for i := 0; i < len(s); i++ { - n += uint64(huffmanCodeLen[s[i]]) - } - return (n + 7) / 8 -} - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go deleted file mode 100644 index a66cfbe..0000000 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ /dev/null @@ -1,479 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "fmt" -) - -// headerFieldTable implements a list of HeaderFields. -// This is used to implement the static and dynamic tables. -type headerFieldTable struct { - // For static tables, entries are never evicted. - // - // For dynamic tables, entries are evicted from ents[0] and added to the end. - // Each entry has a unique id that starts at one and increments for each - // entry that is added. This unique id is stable across evictions, meaning - // it can be used as a pointer to a specific entry. As in hpack, unique ids - // are 1-based. The unique id for ents[k] is k + evictCount + 1. - // - // Zero is not a valid unique id. - // - // evictCount should not overflow in any remotely practical situation. In - // practice, we will have one dynamic table per HTTP/2 connection. If we - // assume a very powerful server that handles 1M QPS per connection and each - // request adds (then evicts) 100 entries from the table, it would still take - // 2M years for evictCount to overflow. - ents []HeaderField - evictCount uint64 - - // byName maps a HeaderField name to the unique id of the newest entry with - // the same name. See above for a definition of "unique id". - byName map[string]uint64 - - // byNameValue maps a HeaderField name/value pair to the unique id of the newest - // entry with the same name and value. See above for a definition of "unique id". - byNameValue map[pairNameValue]uint64 -} - -type pairNameValue struct { - name, value string -} - -func (t *headerFieldTable) init() { - t.byName = make(map[string]uint64) - t.byNameValue = make(map[pairNameValue]uint64) -} - -// len reports the number of entries in the table. -func (t *headerFieldTable) len() int { - return len(t.ents) -} - -// addEntry adds a new entry. -func (t *headerFieldTable) addEntry(f HeaderField) { - id := uint64(t.len()) + t.evictCount + 1 - t.byName[f.Name] = id - t.byNameValue[pairNameValue{f.Name, f.Value}] = id - t.ents = append(t.ents, f) -} - -// evictOldest evicts the n oldest entries in the table. -func (t *headerFieldTable) evictOldest(n int) { - if n > t.len() { - panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) - } - for k := 0; k < n; k++ { - f := t.ents[k] - id := t.evictCount + uint64(k) + 1 - if t.byName[f.Name] == id { - delete(t.byName, f.Name) - } - if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { - delete(t.byNameValue, p) - } - } - copy(t.ents, t.ents[n:]) - for k := t.len() - n; k < t.len(); k++ { - t.ents[k] = HeaderField{} // so strings can be garbage collected - } - t.ents = t.ents[:t.len()-n] - if t.evictCount+uint64(n) < t.evictCount { - panic("evictCount overflow") - } - t.evictCount += uint64(n) -} - -// search finds f in the table. If there is no match, i is 0. -// If both name and value match, i is the matched index and nameValueMatch -// becomes true. If only name matches, i points to that index and -// nameValueMatch becomes false. -// -// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says -// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, -// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic -// table, the return value i actually refers to the entry t.ents[t.len()-i]. -// -// All tables are assumed to be a dynamic tables except for the global -// staticTable pointer. -// -// See Section 2.3.3. -func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - if !f.Sensitive { - if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { - return t.idToIndex(id), true - } - } - if id := t.byName[f.Name]; id != 0 { - return t.idToIndex(id), false - } - return 0, false -} - -// idToIndex converts a unique id to an HPACK index. -// See Section 2.3.3. -func (t *headerFieldTable) idToIndex(id uint64) uint64 { - if id <= t.evictCount { - panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) - } - k := id - t.evictCount - 1 // convert id to an index t.ents[k] - if t != staticTable { - return uint64(t.len()) - k // dynamic table - } - return k + 1 -} - -// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = newStaticTable() -var staticTableEntries = [...]HeaderField{ - {Name: ":authority"}, - {Name: ":method", Value: "GET"}, - {Name: ":method", Value: "POST"}, - {Name: ":path", Value: "/"}, - {Name: ":path", Value: "/index.html"}, - {Name: ":scheme", Value: "http"}, - {Name: ":scheme", Value: "https"}, - {Name: ":status", Value: "200"}, - {Name: ":status", Value: "204"}, - {Name: ":status", Value: "206"}, - {Name: ":status", Value: "304"}, - {Name: ":status", Value: "400"}, - {Name: ":status", Value: "404"}, - {Name: ":status", Value: "500"}, - {Name: "accept-charset"}, - {Name: "accept-encoding", Value: "gzip, deflate"}, - {Name: "accept-language"}, - {Name: "accept-ranges"}, - {Name: "accept"}, - {Name: "access-control-allow-origin"}, - {Name: "age"}, - {Name: "allow"}, - {Name: "authorization"}, - {Name: "cache-control"}, - {Name: "content-disposition"}, - {Name: "content-encoding"}, - {Name: "content-language"}, - {Name: "content-length"}, - {Name: "content-location"}, - {Name: "content-range"}, - {Name: "content-type"}, - {Name: "cookie"}, - {Name: "date"}, - {Name: "etag"}, - {Name: "expect"}, - {Name: "expires"}, - {Name: "from"}, - {Name: "host"}, - {Name: "if-match"}, - {Name: "if-modified-since"}, - {Name: "if-none-match"}, - {Name: "if-range"}, - {Name: "if-unmodified-since"}, - {Name: "last-modified"}, - {Name: "link"}, - {Name: "location"}, - {Name: "max-forwards"}, - {Name: "proxy-authenticate"}, - {Name: "proxy-authorization"}, - {Name: "range"}, - {Name: "referer"}, - {Name: "refresh"}, - {Name: "retry-after"}, - {Name: "server"}, - {Name: "set-cookie"}, - {Name: "strict-transport-security"}, - {Name: "transfer-encoding"}, - {Name: "user-agent"}, - {Name: "vary"}, - {Name: "via"}, - {Name: "www-authenticate"}, -} - -func newStaticTable() *headerFieldTable { - t := &headerFieldTable{} - t.init() - for _, e := range staticTableEntries[:] { - t.addEntry(e) - } - return t -} - -var huffmanCodes = [256]uint32{ - 0x1ff8, - 0x7fffd8, - 0xfffffe2, - 0xfffffe3, - 0xfffffe4, - 0xfffffe5, - 0xfffffe6, - 0xfffffe7, - 0xfffffe8, - 0xffffea, - 0x3ffffffc, - 0xfffffe9, - 0xfffffea, - 0x3ffffffd, - 0xfffffeb, - 0xfffffec, - 0xfffffed, - 0xfffffee, - 0xfffffef, - 0xffffff0, - 0xffffff1, - 0xffffff2, - 0x3ffffffe, - 0xffffff3, - 0xffffff4, - 0xffffff5, - 0xffffff6, - 0xffffff7, - 0xffffff8, - 0xffffff9, - 0xffffffa, - 0xffffffb, - 0x14, - 0x3f8, - 0x3f9, - 0xffa, - 0x1ff9, - 0x15, - 0xf8, - 0x7fa, - 0x3fa, - 0x3fb, - 0xf9, - 0x7fb, - 0xfa, - 0x16, - 0x17, - 0x18, - 0x0, - 0x1, - 0x2, - 0x19, - 0x1a, - 0x1b, - 0x1c, - 0x1d, - 0x1e, - 0x1f, - 0x5c, - 0xfb, - 0x7ffc, - 0x20, - 0xffb, - 0x3fc, - 0x1ffa, - 0x21, - 0x5d, - 0x5e, - 0x5f, - 0x60, - 0x61, - 0x62, - 0x63, - 0x64, - 0x65, - 0x66, - 0x67, - 0x68, - 0x69, - 0x6a, - 0x6b, - 0x6c, - 0x6d, - 0x6e, - 0x6f, - 0x70, - 0x71, - 0x72, - 0xfc, - 0x73, - 0xfd, - 0x1ffb, - 0x7fff0, - 0x1ffc, - 0x3ffc, - 0x22, - 0x7ffd, - 0x3, - 0x23, - 0x4, - 0x24, - 0x5, - 0x25, - 0x26, - 0x27, - 0x6, - 0x74, - 0x75, - 0x28, - 0x29, - 0x2a, - 0x7, - 0x2b, - 0x76, - 0x2c, - 0x8, - 0x9, - 0x2d, - 0x77, - 0x78, - 0x79, - 0x7a, - 0x7b, - 0x7ffe, - 0x7fc, - 0x3ffd, - 0x1ffd, - 0xffffffc, - 0xfffe6, - 0x3fffd2, - 0xfffe7, - 0xfffe8, - 0x3fffd3, - 0x3fffd4, - 0x3fffd5, - 0x7fffd9, - 0x3fffd6, - 0x7fffda, - 0x7fffdb, - 0x7fffdc, - 0x7fffdd, - 0x7fffde, - 0xffffeb, - 0x7fffdf, - 0xffffec, - 0xffffed, - 0x3fffd7, - 0x7fffe0, - 0xffffee, - 0x7fffe1, - 0x7fffe2, - 0x7fffe3, - 0x7fffe4, - 0x1fffdc, - 0x3fffd8, - 0x7fffe5, - 0x3fffd9, - 0x7fffe6, - 0x7fffe7, - 0xffffef, - 0x3fffda, - 0x1fffdd, - 0xfffe9, - 0x3fffdb, - 0x3fffdc, - 0x7fffe8, - 0x7fffe9, - 0x1fffde, - 0x7fffea, - 0x3fffdd, - 0x3fffde, - 0xfffff0, - 0x1fffdf, - 0x3fffdf, - 0x7fffeb, - 0x7fffec, - 0x1fffe0, - 0x1fffe1, - 0x3fffe0, - 0x1fffe2, - 0x7fffed, - 0x3fffe1, - 0x7fffee, - 0x7fffef, - 0xfffea, - 0x3fffe2, - 0x3fffe3, - 0x3fffe4, - 0x7ffff0, - 0x3fffe5, - 0x3fffe6, - 0x7ffff1, - 0x3ffffe0, - 0x3ffffe1, - 0xfffeb, - 0x7fff1, - 0x3fffe7, - 0x7ffff2, - 0x3fffe8, - 0x1ffffec, - 0x3ffffe2, - 0x3ffffe3, - 0x3ffffe4, - 0x7ffffde, - 0x7ffffdf, - 0x3ffffe5, - 0xfffff1, - 0x1ffffed, - 0x7fff2, - 0x1fffe3, - 0x3ffffe6, - 0x7ffffe0, - 0x7ffffe1, - 0x3ffffe7, - 0x7ffffe2, - 0xfffff2, - 0x1fffe4, - 0x1fffe5, - 0x3ffffe8, - 0x3ffffe9, - 0xffffffd, - 0x7ffffe3, - 0x7ffffe4, - 0x7ffffe5, - 0xfffec, - 0xfffff3, - 0xfffed, - 0x1fffe6, - 0x3fffe9, - 0x1fffe7, - 0x1fffe8, - 0x7ffff3, - 0x3fffea, - 0x3fffeb, - 0x1ffffee, - 0x1ffffef, - 0xfffff4, - 0xfffff5, - 0x3ffffea, - 0x7ffff4, - 0x3ffffeb, - 0x7ffffe6, - 0x3ffffec, - 0x3ffffed, - 0x7ffffe7, - 0x7ffffe8, - 0x7ffffe9, - 0x7ffffea, - 0x7ffffeb, - 0xffffffe, - 0x7ffffec, - 0x7ffffed, - 0x7ffffee, - 0x7ffffef, - 0x7fffff0, - 0x3ffffee, -} - -var huffmanCodeLen = [256]uint8{ - 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, - 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, - 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, - 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, - 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, - 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, - 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, - 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, - 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, - 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, - 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, - 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, - 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, - 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, -} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go deleted file mode 100644 index c824282..0000000 --- a/vendor/golang.org/x/net/http2/http2.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package http2 implements the HTTP/2 protocol. -// -// This package is low-level and intended to be used directly by very -// few people. Most users will use it indirectly through the automatic -// use by the net/http package (from Go 1.6 and later). -// For use in earlier Go versions see ConfigureServer. (Transport support -// requires Go 1.6 or later) -// -// See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. -// -package http2 // import "golang.org/x/net/http2" - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "net/http" - "os" - "sort" - "strconv" - "strings" - "sync" - - "golang.org/x/net/http/httpguts" -) - -var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool -) - -func init() { - e := os.Getenv("GODEBUG") - if strings.Contains(e, "http2debug=1") { - VerboseLogs = true - } - if strings.Contains(e, "http2debug=2") { - VerboseLogs = true - logFrameWrites = true - logFrameReads = true - } -} - -const ( - // ClientPreface is the string that must be sent by new - // connections from clients. - ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" - - // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 - initialMaxFrameSize = 16384 - - // NextProtoTLS is the NPN/ALPN protocol negotiated during - // HTTP/2's TLS setup. - NextProtoTLS = "h2" - - // http://http2.github.io/http2-spec/#SettingValues - initialHeaderTableSize = 4096 - - initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size - - defaultMaxReadFrameSize = 1 << 20 -) - -var ( - clientPreface = []byte(ClientPreface) -) - -type streamState int - -// HTTP/2 stream states. -// -// See http://tools.ietf.org/html/rfc7540#section-5.1. -// -// For simplicity, the server code merges "reserved (local)" into -// "half-closed (remote)". This is one less state transition to track. -// The only downside is that we send PUSH_PROMISEs slightly less -// liberally than allowable. More discussion here: -// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html -// -// "reserved (remote)" is omitted since the client code does not -// support server push. -const ( - stateIdle streamState = iota - stateOpen - stateHalfClosedLocal - stateHalfClosedRemote - stateClosed -) - -var stateName = [...]string{ - stateIdle: "Idle", - stateOpen: "Open", - stateHalfClosedLocal: "HalfClosedLocal", - stateHalfClosedRemote: "HalfClosedRemote", - stateClosed: "Closed", -} - -func (st streamState) String() string { - return stateName[st] -} - -// Setting is a setting parameter: which setting it is, and its value. -type Setting struct { - // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues - ID SettingID - - // Val is the value. - Val uint32 -} - -func (s Setting) String() string { - return fmt.Sprintf("[%v = %d]", s.ID, s.Val) -} - -// Valid reports whether the setting is valid. -func (s Setting) Valid() error { - // Limits and error codes from 6.5.2 Defined SETTINGS Parameters - switch s.ID { - case SettingEnablePush: - if s.Val != 1 && s.Val != 0 { - return ConnectionError(ErrCodeProtocol) - } - case SettingInitialWindowSize: - if s.Val > 1<<31-1 { - return ConnectionError(ErrCodeFlowControl) - } - case SettingMaxFrameSize: - if s.Val < 16384 || s.Val > 1<<24-1 { - return ConnectionError(ErrCodeProtocol) - } - } - return nil -} - -// A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings -type SettingID uint16 - -const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 -) - -var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", -} - -func (s SettingID) String() string { - if v, ok := settingName[s]; ok { - return v - } - return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) -} - -var ( - errInvalidHeaderFieldName = errors.New("http2: invalid header field name") - errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") -) - -// validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httpguts.ValidHeaderName for the base rules. -// -// Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " -func validWireHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !httpguts.IsTokenRune(r) { - return false - } - if 'A' <= r && r <= 'Z' { - return false - } - } - return true -} - -var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) - -func init() { - for i := 100; i <= 999; i++ { - if v := http.StatusText(i); v != "" { - httpCodeStringCommon[i] = strconv.Itoa(i) - } - } -} - -func httpCodeString(code int) string { - if s, ok := httpCodeStringCommon[code]; ok { - return s - } - return strconv.Itoa(code) -} - -// from pkg io -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - -// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). -type closeWaiter chan struct{} - -// Init makes a closeWaiter usable. -// It exists because so a closeWaiter value can be placed inside a -// larger struct and have the Mutex and Cond's memory in the same -// allocation. -func (cw *closeWaiter) Init() { - *cw = make(chan struct{}) -} - -// Close marks the closeWaiter as closed and unblocks any waiters. -func (cw closeWaiter) Close() { - close(cw) -} - -// Wait waits for the closeWaiter to become closed. -func (cw closeWaiter) Wait() { - <-cw -} - -// bufferedWriter is a buffered writer that writes to w. -// Its buffered writer is lazily allocated as needed, to minimize -// idle memory usage with many connections. -type bufferedWriter struct { - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered -} - -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} -} - -// bufWriterPoolBufferSize is the size of bufio.Writer's -// buffers created using bufWriterPool. -// -// TODO: pick a less arbitrary value? this is a bit under -// (3 x typical 1500 byte MTU) at least. Other than that, -// not much thought went into it. -const bufWriterPoolBufferSize = 4 << 10 - -var bufWriterPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) - }, -} - -func (w *bufferedWriter) Available() int { - if w.bw == nil { - return bufWriterPoolBufferSize - } - return w.bw.Available() -} - -func (w *bufferedWriter) Write(p []byte) (n int, err error) { - if w.bw == nil { - bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) - w.bw = bw - } - return w.bw.Write(p) -} - -func (w *bufferedWriter) Flush() error { - bw := w.bw - if bw == nil { - return nil - } - err := bw.Flush() - bw.Reset(nil) - bufWriterPool.Put(bw) - w.bw = nil - return err -} - -func mustUint31(v int32) uint32 { - if v < 0 || v > 2147483647 { - panic("out of range") - } - return uint32(v) -} - -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 7230, section 3.3. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -type httpError struct { - msg string - timeout bool -} - -func (e *httpError) Error() string { return e.msg } -func (e *httpError) Timeout() bool { return e.timeout } -func (e *httpError) Temporary() bool { return true } - -var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} - -type connectionStater interface { - ConnectionState() tls.ConnectionState -} - -var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} - -type sorter struct { - v []string // owned by sorter -} - -func (s *sorter) Len() int { return len(s.v) } -func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } -func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } - -// Keys returns the sorted keys of h. -// -// The returned slice is only valid until s used again or returned to -// its pool. -func (s *sorter) Keys(h http.Header) []string { - keys := s.v[:0] - for k := range h { - keys = append(keys, k) - } - s.v = keys - sort.Sort(s) - return keys -} - -func (s *sorter) SortStrings(ss []string) { - // Our sorter works on s.v, which sorter owns, so - // stash it away while we sort the user's buffer. - save := s.v - s.v = ss - sort.Sort(s) - s.v = save -} - -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// *) a non-empty string starting with '/' -// *) the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go deleted file mode 100644 index 508cebc..0000000 --- a/vendor/golang.org/x/net/http2/not_go16.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.6 - -package http2 - -import ( - "net/http" - "time" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - return nil, errTransportVersion -} - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return 0 - -} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go deleted file mode 100644 index 140434a..0000000 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package http2 - -import ( - "crypto/tls" - "net" - "net/http" - "time" -) - -type contextContext interface { - Done() <-chan struct{} - Err() error -} - -type fakeContext struct{} - -func (fakeContext) Done() <-chan struct{} { return nil } -func (fakeContext) Err() error { panic("should not be called") } - -func reqContext(r *http.Request) fakeContext { - return fakeContext{} -} - -func setResponseUncompressed(res *http.Response) { - // Nothing. -} - -type clientTrace struct{} - -func requestTrace(*http.Request) *clientTrace { return nil } -func traceGotConn(*http.Request, *ClientConn) {} -func traceFirstResponseByte(*clientTrace) {} -func traceWroteHeaders(*clientTrace) {} -func traceWroteRequest(*clientTrace, error) {} -func traceGot100Continue(trace *clientTrace) {} -func traceWait100Continue(trace *clientTrace) {} - -func nop() {} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - return nil, nop -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return ctx, nop -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req -} - -// temporary copy of Go 1.6's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} - -func (cc *ClientConn) Ping(ctx contextContext) error { - return cc.ping(ctx) -} - -func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go deleted file mode 100644 index 6f8d3f8..0000000 --- a/vendor/golang.org/x/net/http2/not_go18.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package http2 - -import ( - "io" - "net/http" -) - -func configureServer18(h1 *http.Server, h2 *Server) error { - // No IdleTimeout to sync prior to Go 1.8. - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return nil -} - -func reqBodyIsNoBody(io.ReadCloser) bool { return false } - -func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go deleted file mode 100644 index 5ae0772..0000000 --- a/vendor/golang.org/x/net/http2/not_go19.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package http2 - -import ( - "net/http" -) - -func configureServer19(s *http.Server, conf *Server) error { - // not supported prior to go1.9 - return nil -} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go deleted file mode 100644 index a614009..0000000 --- a/vendor/golang.org/x/net/http2/pipe.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "io" - "sync" -) - -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like -// io.Pipe except there are no PipeReader/PipeWriter halves, and the -// underlying buffer is an interface. (io.Pipe is always unbuffered) -type pipe struct { - mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer // nil when done reading - err error // read error once empty. non-nil means closed. - breakErr error // immediate read error (caller doesn't see rest of b) - donec chan struct{} // closed on error - readFn func() // optional code to run in Read before error -} - -type pipeBuffer interface { - Len() int - io.Writer - io.Reader -} - -func (p *pipe) Len() int { - p.mu.Lock() - defer p.mu.Unlock() - if p.b == nil { - return 0 - } - return p.b.Len() -} - -// Read waits until data is available and copies bytes -// from the buffer into p. -func (p *pipe) Read(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - for { - if p.breakErr != nil { - return 0, p.breakErr - } - if p.b != nil && p.b.Len() > 0 { - return p.b.Read(d) - } - if p.err != nil { - if p.readFn != nil { - p.readFn() // e.g. copy trailers - p.readFn = nil // not sticky like p.err - } - p.b = nil - return 0, p.err - } - p.c.Wait() - } -} - -var errClosedPipeWrite = errors.New("write on closed buffer") - -// Write copies bytes from p into the buffer and wakes a reader. -// It is an error to write more data than the buffer can hold. -func (p *pipe) Write(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if p.err != nil { - return 0, errClosedPipeWrite - } - if p.breakErr != nil { - return len(d), nil // discard when there is no reader - } - return p.b.Write(d) -} - -// CloseWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err after all data has been -// read. -// -// The error must be non-nil. -func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } - -// BreakWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err immediately, without -// waiting for unread data. -func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } - -// closeWithErrorAndCode is like CloseWithError but also sets some code to run -// in the caller's goroutine before returning the error. -func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } - -func (p *pipe) closeWithError(dst *error, err error, fn func()) { - if err == nil { - panic("err must be non-nil") - } - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if *dst != nil { - // Already been done. - return - } - p.readFn = fn - if dst == &p.breakErr { - p.b = nil - } - *dst = err - p.closeDoneLocked() -} - -// requires p.mu be held. -func (p *pipe) closeDoneLocked() { - if p.donec == nil { - return - } - // Close if unclosed. This isn't racy since we always - // hold p.mu while closing. - select { - case <-p.donec: - default: - close(p.donec) - } -} - -// Err returns the error (if any) first set by BreakWithError or CloseWithError. -func (p *pipe) Err() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.breakErr != nil { - return p.breakErr - } - return p.err -} - -// Done returns a channel which is closed if and when this pipe is closed -// with CloseWithError. -func (p *pipe) Done() <-chan struct{} { - p.mu.Lock() - defer p.mu.Unlock() - if p.donec == nil { - p.donec = make(chan struct{}) - if p.err != nil || p.breakErr != nil { - // Already hit an error. - p.closeDoneLocked() - } - } - return p.donec -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go deleted file mode 100644 index 7938991..0000000 --- a/vendor/golang.org/x/net/http2/server.go +++ /dev/null @@ -1,2878 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO: turn off the serve goroutine when idle, so -// an idle conn only has the readFrames goroutine active. (which could -// also be optimized probably to pin less memory in crypto/tls). This -// would involve tracking when the serve goroutine is active (atomic -// int32 read/CAS probably?) and starting it up when frames arrive, -// and shutting it down when all handlers exit. the occasional PING -// packets could use time.AfterFunc to call sc.wakeStartServeLoop() -// (which is a no-op if already running) and then queue the PING write -// as normal. The serve loop would then exit in most cases (if no -// Handlers running) and not be woken up again until the PING packet -// returns. - -// TODO (maybe): add a mechanism for Handlers to going into -// half-closed-local mode (rw.(io.Closer) test?) but not exit their -// handler, and continue to be able to read from the -// Request.Body. This would be a somewhat semantic change from HTTP/1 -// (or at least what we expose in net/http), so I'd probably want to -// add it there too. For now, this package says that returning from -// the Handler ServeHTTP function means you're both done reading and -// done writing, without a way to stop just one or the other. - -package http2 - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "log" - "math" - "net" - "net/http" - "net/textproto" - "net/url" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http/httpguts" - "golang.org/x/net/http2/hpack" -) - -const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? -) - -var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") -) - -var responseWriterStatePool = sync.Pool{ - New: func() interface{} { - rws := &responseWriterState{} - rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) - return rws - }, -} - -// Test hooks. -var ( - testHookOnConn func() - testHookGetServerConn func(*serverConn) - testHookOnPanicMu *sync.Mutex // nil except in tests - testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) -) - -// Server is an HTTP/2 server. -type Server struct { - // MaxHandlers limits the number of http.Handler ServeHTTP goroutines - // which may run at a time over all connections. - // Negative or zero no limit. - // TODO: implement - MaxHandlers int - - // MaxConcurrentStreams optionally specifies the number of - // concurrent streams that each client may have open at a - // time. This is unrelated to the number of http.Handler goroutines - // which may be active globally, which is MaxHandlers. - // If zero, MaxConcurrentStreams defaults to at least 100, per - // the HTTP/2 spec's recommendations. - MaxConcurrentStreams uint32 - - // MaxReadFrameSize optionally specifies the largest frame - // this server is willing to read. A valid value is between - // 16k and 16M, inclusive. If zero or otherwise invalid, a - // default value is used. - MaxReadFrameSize uint32 - - // PermitProhibitedCipherSuites, if true, permits the use of - // cipher suites prohibited by the HTTP/2 spec. - PermitProhibitedCipherSuites bool - - // IdleTimeout specifies how long until idle clients should be - // closed with a GOAWAY frame. PING frames are not considered - // activity for the purposes of IdleTimeout. - IdleTimeout time.Duration - - // MaxUploadBufferPerConnection is the size of the initial flow - // control window for each connections. The HTTP/2 spec does not - // allow this to be smaller than 65535 or larger than 2^32-1. - // If the value is outside this range, a default value will be - // used instead. - MaxUploadBufferPerConnection int32 - - // MaxUploadBufferPerStream is the size of the initial flow control - // window for each stream. The HTTP/2 spec does not allow this to - // be larger than 2^32-1. If the value is zero or larger than the - // maximum, a default value will be used instead. - MaxUploadBufferPerStream int32 - - // NewWriteScheduler constructs a write scheduler for a connection. - // If nil, a default scheduler is chosen. - NewWriteScheduler func() WriteScheduler - - // Internal state. This is a pointer (rather than embedded directly) - // so that we don't embed a Mutex in this struct, which will make the - // struct non-copyable, which might break some callers. - state *serverInternalState -} - -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection > initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -type serverInternalState struct { - mu sync.Mutex - activeConns map[*serverConn]struct{} -} - -func (s *serverInternalState) registerConn(sc *serverConn) { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - s.activeConns[sc] = struct{}{} - s.mu.Unlock() -} - -func (s *serverInternalState) unregisterConn(sc *serverConn) { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - delete(s.activeConns, sc) - s.mu.Unlock() -} - -func (s *serverInternalState) startGracefulShutdown() { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - for sc := range s.activeConns { - sc.startGracefulShutdown() - } - s.mu.Unlock() -} - -// ConfigureServer adds HTTP/2 support to a net/http Server. -// -// The configuration conf may be nil. -// -// ConfigureServer must be called before s begins serving. -func ConfigureServer(s *http.Server, conf *Server) error { - if s == nil { - panic("nil *http.Server") - } - if conf == nil { - conf = new(Server) - } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} - if err := configureServer18(s, conf); err != nil { - return err - } - if err := configureServer19(s, conf); err != nil { - return err - } - - if s.TLSConfig == nil { - s.TLSConfig = new(tls.Config) - } else if s.TLSConfig.CipherSuites != nil { - // If they already provided a CipherSuite list, return - // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. - haveRequired := false - sawBad := false - for i, cs := range s.TLSConfig.CipherSuites { - switch cs { - case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - // Alternative MTI cipher to not discourage ECDSA-only servers. - // See http://golang.org/cl/30721 for further information. - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: - haveRequired = true - } - if isBadCipher(cs) { - sawBad = true - } else if sawBad { - return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) - } - } - if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") - } - } - - // Note: not setting MinVersion to tls.VersionTLS12, - // as we don't want to interfere with HTTP/1.1 traffic - // on the user's server. We enforce TLS 1.2 later once - // we accept a connection. Ideally this should be done - // during next-proto selection, but using TLS <1.2 with - // HTTP/2 is still the client's bug. - - s.TLSConfig.PreferServerCipherSuites = true - - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) - } - - if s.TLSNextProto == nil { - s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} - } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { - if testHookOnConn != nil { - testHookOnConn() - } - conf.ServeConn(c, &ServeConnOpts{ - Handler: h, - BaseConfig: hs, - }) - } - s.TLSNextProto[NextProtoTLS] = protoHandler - return nil -} - -// ServeConnOpts are options for the Server.ServeConn method. -type ServeConnOpts struct { - // BaseConfig optionally sets the base configuration - // for values. If nil, defaults are used. - BaseConfig *http.Server - - // Handler specifies which handler to use for processing - // requests. If nil, BaseConfig.Handler is used. If BaseConfig - // or BaseConfig.Handler is nil, http.DefaultServeMux is used. - Handler http.Handler -} - -func (o *ServeConnOpts) baseConfig() *http.Server { - if o != nil && o.BaseConfig != nil { - return o.BaseConfig - } - return new(http.Server) -} - -func (o *ServeConnOpts) handler() http.Handler { - if o != nil { - if o.Handler != nil { - return o.Handler - } - if o.BaseConfig != nil && o.BaseConfig.Handler != nil { - return o.BaseConfig.Handler - } - } - return http.DefaultServeMux -} - -// ServeConn serves HTTP/2 requests on the provided connection and -// blocks until the connection is no longer readable. -// -// ServeConn starts speaking HTTP/2 assuming that c has not had any -// reads or writes. It writes its initial settings frame and expects -// to be able to read the preface and settings frame from the -// client. If c has a ConnectionState method like a *tls.Conn, the -// ConnectionState is used to verify the TLS ciphersuite and to set -// the Request.TLS field in Handlers. -// -// ServeConn does not support h2c by itself. Any h2c support must be -// implemented in terms of providing a suitably-behaving net.Conn. -// -// The opts parameter is optional. If nil, default values are used. -func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { - baseCtx, cancel := serverConnBaseContext(c, opts) - defer cancel() - - sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan FrameWriteRequest, 8), - serveMsgCh: make(chan interface{}, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), - initialStreamSendWindowSize: initialWindowSize, - maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, - } - - s.state.registerConn(sc) - defer s.state.unregisterConn(sc) - - // The net/http package sets the write deadline from the - // http.Server.WriteTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already set. - // Write deadlines are set per stream in serverConn.newStream. - // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { - sc.conn.SetWriteDeadline(time.Time{}) - } - - if s.NewWriteScheduler != nil { - sc.writeSched = s.NewWriteScheduler() - } else { - sc.writeSched = NewRandomWriteScheduler() - } - - // These start at the RFC-specified defaults. If there is a higher - // configured value for inflow, that will be updated when we send a - // WINDOW_UPDATE shortly after sending SETTINGS. - sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) - sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - - fr := NewFramer(sc.bw, c) - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) - sc.framer = fr - - if tc, ok := c.(connectionStater); ok { - sc.tlsState = new(tls.ConnectionState) - *sc.tlsState = tc.ConnectionState() - // 9.2 Use of TLS Features - // An implementation of HTTP/2 over TLS MUST use TLS - // 1.2 or higher with the restrictions on feature set - // and cipher suite described in this section. Due to - // implementation limitations, it might not be - // possible to fail TLS negotiation. An endpoint MUST - // immediately terminate an HTTP/2 connection that - // does not meet the TLS requirements described in - // this section with a connection error (Section - // 5.4.1) of type INADEQUATE_SECURITY. - if sc.tlsState.Version < tls.VersionTLS12 { - sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") - return - } - - if sc.tlsState.ServerName == "" { - // Client must use SNI, but we don't enforce that anymore, - // since it was causing problems when connecting to bare IP - // addresses during development. - // - // TODO: optionally enforce? Or enforce at the time we receive - // a new request, and verify the ServerName matches the :authority? - // But that precludes proxy situations, perhaps. - // - // So for now, do nothing here again. - } - - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { - // "Endpoints MAY choose to generate a connection error - // (Section 5.4.1) of type INADEQUATE_SECURITY if one of - // the prohibited cipher suites are negotiated." - // - // We choose that. In my opinion, the spec is weak - // here. It also says both parties must support at least - // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no - // excuses here. If we really must, we could allow an - // "AllowInsecureWeakCiphers" option on the server later. - // Let's see how it plays out first. - sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) - return - } - } - - if hook := testHookGetServerConn; hook != nil { - hook(sc) - } - sc.serve() -} - -func (sc *serverConn) rejectConn(err ErrCode, debug string) { - sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) - // ignoring errors. hanging up anyway. - sc.framer.WriteGoAway(0, err, []byte(debug)) - sc.bw.Flush() - sc.conn.Close() -} - -type serverConn struct { - // Immutable: - srv *Server - hs *http.Server - conn net.Conn - bw *bufferedWriter // writing to conn - handler http.Handler - baseCtx contextContext - framer *Framer - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan readFrameResult // written by serverConn.readFrames - wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve - wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http - remoteAddrStr string - writeSched WriteScheduler - - // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curClientStreams uint32 // number of open streams initiated by the client - curPushedStreams uint32 // number of open streams initiated by server push - maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests - maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes - streams map[uint32]*stream - initialStreamSendWindowSize int32 - maxFrameSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started writing a frame (on serve goroutine or separate) - writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - inGoAway bool // we've started to or sent GOAWAY - inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused - - // Owned by the writeFrameAsync goroutine: - headerWriteBuf bytes.Buffer - hpackEncoder *hpack.Encoder - - // Used by startGracefulShutdown. - shutdownOnce sync.Once -} - -func (sc *serverConn) maxHeaderListSize() uint32 { - n := sc.hs.MaxHeaderBytes - if n <= 0 { - n = http.DefaultMaxHeaderBytes - } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) -} - -func (sc *serverConn) curOpenStreams() uint32 { - sc.serveG.check() - return sc.curClientStreams + sc.curPushedStreams -} - -// stream represents a stream. This is the minimal metadata needed by -// the serve goroutine. Most of the actual stream state is owned by -// the http.Handler's goroutine in the responseWriter. Because the -// responseWriter's responseWriterState is recycled at the end of a -// handler, this struct intentionally has no pointer to the -// *responseWriter{,State} itself, as the Handler ending nils out the -// responseWriter's state field. -type stream struct { - // immutable: - sc *serverConn - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state - ctx contextContext - cancelCtx func() - - // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - numTrailerValues int64 - weight uint8 - state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - writeDeadline *time.Timer // nil if unused - - trailer http.Header // accumulated trailers - reqTrailer http.Header // handler's Request.Trailer -} - -func (sc *serverConn) Framer() *Framer { return sc.framer } -func (sc *serverConn) CloseConn() error { return sc.conn.Close() } -func (sc *serverConn) Flush() error { return sc.bw.Flush() } -func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { - return sc.hpackEncoder, &sc.headerWriteBuf -} - -func (sc *serverConn) state(streamID uint32) (streamState, *stream) { - sc.serveG.check() - // http://tools.ietf.org/html/rfc7540#section-5.1 - if st, ok := sc.streams[streamID]; ok { - return st.state, st - } - // "The first use of a new stream identifier implicitly closes all - // streams in the "idle" state that might have been initiated by - // that peer with a lower-valued stream identifier. For example, if - // a client sends a HEADERS frame on stream 7 without ever sending a - // frame on stream 5, then stream 5 transitions to the "closed" - // state when the first frame for stream 7 is sent or received." - if streamID%2 == 1 { - if streamID <= sc.maxClientStreamID { - return stateClosed, nil - } - } else { - if streamID <= sc.maxPushPromiseID { - return stateClosed, nil - } - } - return stateIdle, nil -} - -// setConnState calls the net/http ConnState hook for this connection, if configured. -// Note that the net/http package does StateNew and StateClosed for us. -// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. -func (sc *serverConn) setConnState(state http.ConnState) { - if sc.hs.ConnState != nil { - sc.hs.ConnState(sc.conn, state) - } -} - -func (sc *serverConn) vlogf(format string, args ...interface{}) { - if VerboseLogs { - sc.logf(format, args...) - } -} - -func (sc *serverConn) logf(format string, args ...interface{}) { - if lg := sc.hs.ErrorLog; lg != nil { - lg.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// errno returns v's underlying uintptr, else 0. -// -// TODO: remove this helper function once http2 can use build -// tags. See comment in isClosedConnError. -func errno(v error) uintptr { - if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { - return uintptr(rv.Uint()) - } - return 0 -} - -// isClosedConnError reports whether err is an error from use of a closed -// network connection. -func isClosedConnError(err error) bool { - if err == nil { - return false - } - - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { - return true - } - - // TODO(bradfitz): x/tools/cmd/bundle doesn't really support - // build tags, so I can't make an http2_windows.go file with - // Windows-specific stuff. Fix that and move this, once we - // have a way to bundle this into std's net/http somehow. - if runtime.GOOS == "windows" { - if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { - if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { - const WSAECONNABORTED = 10053 - const WSAECONNRESET = 10054 - if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { - return true - } - } - } - } - return false -} - -func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { - if err == nil { - return - } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { - // Boring, expected errors. - sc.vlogf(format, args...) - } else { - sc.logf(format, args...) - } -} - -func (sc *serverConn) canonicalHeader(v string) string { - sc.serveG.check() - cv, ok := commonCanonHeader[v] - if ok { - return cv - } - cv, ok = sc.canonHeader[v] - if ok { - return cv - } - if sc.canonHeader == nil { - sc.canonHeader = make(map[string]string) - } - cv = http.CanonicalHeaderKey(v) - sc.canonHeader[v] = cv - return cv -} - -type readFrameResult struct { - f Frame // valid until readMore is called - err error - - // readMore should be called once the consumer no longer needs or - // retains f. After readMore, f is invalid and more frames can be - // read. - readMore func() -} - -// readFrames is the loop that reads incoming frames. -// It takes care to only read one frame at a time, blocking until the -// consumer is done with the frame. -// It's run on its own goroutine. -func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done - for { - f, err := sc.framer.ReadFrame() - select { - case sc.readFrameCh <- readFrameResult{f, err, gateDone}: - case <-sc.doneServing: - return - } - select { - case <-gate: - case <-sc.doneServing: - return - } - if terminalReadFrameError(err) { - return - } - } -} - -// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. -type frameWriteResult struct { - wr FrameWriteRequest // what was written (or attempted) - err error // result of the writeFrame call -} - -// writeFrameAsync runs in its own goroutine and writes a single frame -// and then reports when it's done. -// At most one goroutine can be running writeFrameAsync at a time per -// serverConn. -func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { - err := wr.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wr, err} -} - -func (sc *serverConn) closeAllStreamsOnConnClose() { - sc.serveG.check() - for _, st := range sc.streams { - sc.closeStream(st, errClientDisconnected) - } -} - -func (sc *serverConn) stopShutdownTimer() { - sc.serveG.check() - if t := sc.shutdownTimer; t != nil { - t.Stop() - } -} - -func (sc *serverConn) notePanic() { - // Note: this is for serverConn.serve panicking, not http.Handler code. - if testHookOnPanicMu != nil { - testHookOnPanicMu.Lock() - defer testHookOnPanicMu.Unlock() - } - if testHookOnPanic != nil { - if e := recover(); e != nil { - if testHookOnPanic(sc, e) { - panic(e) - } - } - } -} - -func (sc *serverConn) serve() { - sc.serveG.check() - defer sc.notePanic() - defer sc.conn.Close() - defer sc.closeAllStreamsOnConnClose() - defer sc.stopShutdownTimer() - defer close(sc.doneServing) // unblocks handlers trying to send - - if VerboseLogs { - sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) - } - - sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, - }, - }) - sc.unackedSettings++ - - // Each connection starts with intialWindowSize inflow tokens. - // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { - sc.sendWindowUpdate(nil, int(diff)) - } - - if err := sc.readPreface(); err != nil { - sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) - return - } - // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. - // Active means we read some data and anticipate a request. We'll - // do another Active when we get a HEADERS frame. - sc.setConnState(http.StateActive) - sc.setConnState(http.StateIdle) - - if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) - defer sc.idleTimer.Stop() - } - - go sc.readFrames() // closed by defer sc.conn.Close above - - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) - defer settingsTimer.Stop() - - loopNum := 0 - for { - loopNum++ - select { - case wr := <-sc.wantWriteFrameCh: - if se, ok := wr.write.(StreamError); ok { - sc.resetStream(se) - break - } - sc.writeFrame(wr) - case res := <-sc.wroteFrameCh: - sc.wroteFrame(res) - case res := <-sc.readFrameCh: - if !sc.processFrameFromReader(res) { - return - } - res.readMore() - if settingsTimer != nil { - settingsTimer.Stop() - settingsTimer = nil - } - case m := <-sc.bodyReadCh: - sc.noteBodyRead(m.st, m.n) - case msg := <-sc.serveMsgCh: - switch v := msg.(type) { - case func(int): - v(loopNum) // for testing - case *serverMessage: - switch v { - case settingsTimerMsg: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case idleTimerMsg: - sc.vlogf("connection is idle") - sc.goAway(ErrCodeNo) - case shutdownTimerMsg: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case gracefulShutdownMsg: - sc.startGracefulShutdownInternal() - default: - panic("unknown timer") - } - case *startPushRequest: - sc.startPush(v) - default: - panic(fmt.Sprintf("unexpected type %T", v)) - } - } - - // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY - // with no error code (graceful shutdown), don't start the timer until - // all open streams have been completed. - sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame - gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 - if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { - sc.shutDownIn(goAwayTimeout) - } - } -} - -func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { - select { - case <-sc.doneServing: - case <-sharedCh: - close(privateCh) - } -} - -type serverMessage int - -// Message values sent to serveMsgCh. -var ( - settingsTimerMsg = new(serverMessage) - idleTimerMsg = new(serverMessage) - shutdownTimerMsg = new(serverMessage) - gracefulShutdownMsg = new(serverMessage) -) - -func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } -func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } -func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } - -func (sc *serverConn) sendServeMsg(msg interface{}) { - sc.serveG.checkNotOn() // NOT - select { - case sc.serveMsgCh <- msg: - case <-sc.doneServing: - } -} - -var errPrefaceTimeout = errors.New("timeout waiting for client preface") - -// readPreface reads the ClientPreface greeting from the peer or -// returns errPrefaceTimeout on timeout, or an error if the greeting -// is invalid. -func (sc *serverConn) readPreface() error { - errc := make(chan error, 1) - go func() { - // Read the client preface - buf := make([]byte, len(ClientPreface)) - if _, err := io.ReadFull(sc.conn, buf); err != nil { - errc <- err - } else if !bytes.Equal(buf, clientPreface) { - errc <- fmt.Errorf("bogus greeting %q", buf) - } else { - errc <- nil - } - }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? - defer timer.Stop() - select { - case <-timer.C: - return errPrefaceTimeout - case err := <-errc: - if err == nil { - if VerboseLogs { - sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) - } - } - return err - } -} - -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -var writeDataPool = sync.Pool{ - New: func() interface{} { return new(writeData) }, -} - -// writeDataFromHandler writes DATA response frames from a handler on -// the given stream. -func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) - writeArg := writeDataPool.Get().(*writeData) - *writeArg = writeData{stream.id, data, endStream} - err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: writeArg, - stream: stream, - done: ch, - }) - if err != nil { - return err - } - var frameWriteDone bool // the frame write is done (successfully or not) - select { - case err = <-ch: - frameWriteDone = true - case <-sc.doneServing: - return errClientDisconnected - case <-stream.cw: - // If both ch and stream.cw were ready (as might - // happen on the final Write after an http.Handler - // ends), prefer the write result. Otherwise this - // might just be us successfully closing the stream. - // The writeFrameAsync and serve goroutines guarantee - // that the ch send will happen before the stream.cw - // close. - select { - case err = <-ch: - frameWriteDone = true - default: - return errStreamClosed - } - } - errChanPool.Put(ch) - if frameWriteDone { - writeDataPool.Put(writeArg) - } - return err -} - -// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts -// if the connection has gone away. -// -// This must not be run from the serve goroutine itself, else it might -// deadlock writing to sc.wantWriteFrameCh (which is only mildly -// buffered and is read by serve itself). If you're on the serve -// goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { - sc.serveG.checkNotOn() // NOT - select { - case sc.wantWriteFrameCh <- wr: - return nil - case <-sc.doneServing: - // Serve loop is gone. - // Client has closed their connection to the server. - return errClientDisconnected - } -} - -// writeFrame schedules a frame to write and sends it if there's nothing -// already being written. -// -// There is no pushback here (the serve goroutine never blocks). It's -// the http.Handlers that block, waiting for their previous frames to -// make it onto the wire -// -// If you're not on the serve goroutine, use writeFrameFromHandler instead. -func (sc *serverConn) writeFrame(wr FrameWriteRequest) { - sc.serveG.check() - - // If true, wr will not be written and wr.done will not be signaled. - var ignoreWrite bool - - // We are not allowed to write frames on closed streams. RFC 7540 Section - // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on - // a closed stream." Our server never sends PRIORITY, so that exception - // does not apply. - // - // The serverConn might close an open stream while the stream's handler - // is still running. For example, the server might close a stream when it - // receives bad data from the client. If this happens, the handler might - // attempt to write a frame after the stream has been closed (since the - // handler hasn't yet been notified of the close). In this case, we simply - // ignore the frame. The handler will notice that the stream is closed when - // it waits for the frame to be written. - // - // As an exception to this rule, we allow sending RST_STREAM after close. - // This allows us to immediately reject new streams without tracking any - // state for those streams (except for the queued RST_STREAM frame). This - // may result in duplicate RST_STREAMs in some cases, but the client should - // ignore those. - if wr.StreamID() != 0 { - _, isReset := wr.write.(StreamError) - if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { - ignoreWrite = true - } - } - - // Don't send a 100-continue response if we've already sent headers. - // See golang.org/issue/14030. - switch wr.write.(type) { - case *writeResHeaders: - wr.stream.wroteHeaders = true - case write100ContinueHeadersFrame: - if wr.stream.wroteHeaders { - // We do not need to notify wr.done because this frame is - // never written with wr.done != nil. - if wr.done != nil { - panic("wr.done != nil for write100ContinueHeadersFrame") - } - ignoreWrite = true - } - } - - if !ignoreWrite { - sc.writeSched.Push(wr) - } - sc.scheduleFrameWrite() -} - -// startFrameWrite starts a goroutine to write wr (in a separate -// goroutine since that might block on the network), and updates the -// serve goroutine's state about the world, updated from info in wr. -func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { - sc.serveG.check() - if sc.writingFrame { - panic("internal error: can only be writing one frame at a time") - } - - st := wr.stream - if st != nil { - switch st.state { - case stateHalfClosedLocal: - switch wr.write.(type) { - case StreamError, handlerPanicRST, writeWindowUpdate: - // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE - // in this state. (We never send PRIORITY from the server, so that is not checked.) - default: - panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) - } - case stateClosed: - panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) - } - } - if wpp, ok := wr.write.(*writePushPromise); ok { - var err error - wpp.promisedID, err = wpp.allocatePromisedID() - if err != nil { - sc.writingFrameAsync = false - wr.replyToWriter(err) - return - } - } - - sc.writingFrame = true - sc.needsFrameFlush = true - if wr.write.staysWithinBuffer(sc.bw.Available()) { - sc.writingFrameAsync = false - err := wr.write.writeFrame(sc) - sc.wroteFrame(frameWriteResult{wr, err}) - } else { - sc.writingFrameAsync = true - go sc.writeFrameAsync(wr) - } -} - -// errHandlerPanicked is the error given to any callers blocked in a read from -// Request.Body when the main goroutine panics. Since most handlers read in the -// the main ServeHTTP goroutine, this will show up rarely. -var errHandlerPanicked = errors.New("http2: handler panicked") - -// wroteFrame is called on the serve goroutine with the result of -// whatever happened on writeFrameAsync. -func (sc *serverConn) wroteFrame(res frameWriteResult) { - sc.serveG.check() - if !sc.writingFrame { - panic("internal error: expected to be already writing a frame") - } - sc.writingFrame = false - sc.writingFrameAsync = false - - wr := res.wr - - if writeEndsStream(wr.write) { - st := wr.stream - if st == nil { - panic("internal error: expecting non-nil stream") - } - switch st.state { - case stateOpen: - // Here we would go to stateHalfClosedLocal in - // theory, but since our handler is done and - // the net/http package provides no mechanism - // for closing a ResponseWriter while still - // reading data (see possible TODO at top of - // this file), we go into closed state here - // anyway, after telling the peer we're - // hanging up on them. We'll transition to - // stateClosed after the RST_STREAM frame is - // written. - st.state = stateHalfClosedLocal - // Section 8.1: a server MAY request that the client abort - // transmission of a request without error by sending a - // RST_STREAM with an error code of NO_ERROR after sending - // a complete response. - sc.resetStream(streamError(st.id, ErrCodeNo)) - case stateHalfClosedRemote: - sc.closeStream(st, errHandlerComplete) - } - } else { - switch v := wr.write.(type) { - case StreamError: - // st may be unknown if the RST_STREAM was generated to reject bad input. - if st, ok := sc.streams[v.StreamID]; ok { - sc.closeStream(st, v) - } - case handlerPanicRST: - sc.closeStream(wr.stream, errHandlerPanicked) - } - } - - // Reply (if requested) to unblock the ServeHTTP goroutine. - wr.replyToWriter(res.err) - - sc.scheduleFrameWrite() -} - -// scheduleFrameWrite tickles the frame writing scheduler. -// -// If a frame is already being written, nothing happens. This will be called again -// when the frame is done being written. -// -// If a frame isn't being written we need to send one, the best frame -// to send is selected, preferring first things that aren't -// stream-specific (e.g. ACKing settings), and then finding the -// highest priority stream. -// -// If a frame isn't being written and there's nothing else to send, we -// flush the write buffer. -func (sc *serverConn) scheduleFrameWrite() { - sc.serveG.check() - if sc.writingFrame || sc.inFrameScheduleLoop { - return - } - sc.inFrameScheduleLoop = true - for !sc.writingFrameAsync { - if sc.needToSendGoAway { - sc.needToSendGoAway = false - sc.startFrameWrite(FrameWriteRequest{ - write: &writeGoAway{ - maxStreamID: sc.maxClientStreamID, - code: sc.goAwayCode, - }, - }) - continue - } - if sc.needToSendSettingsAck { - sc.needToSendSettingsAck = false - sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) - continue - } - if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { - if wr, ok := sc.writeSched.Pop(); ok { - sc.startFrameWrite(wr) - continue - } - } - if sc.needsFrameFlush { - sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) - sc.needsFrameFlush = false // after startFrameWrite, since it sets this true - continue - } - break - } - sc.inFrameScheduleLoop = false -} - -// startGracefulShutdown gracefully shuts down a connection. This -// sends GOAWAY with ErrCodeNo to tell the client we're gracefully -// shutting down. The connection isn't closed until all current -// streams are done. -// -// startGracefulShutdown returns immediately; it does not wait until -// the connection has shut down. -func (sc *serverConn) startGracefulShutdown() { - sc.serveG.checkNotOn() // NOT - sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) -} - -// After sending GOAWAY, the connection will close after goAwayTimeout. -// If we close the connection immediately after sending GOAWAY, there may -// be unsent data in our kernel receive buffer, which will cause the kernel -// to send a TCP RST on close() instead of a FIN. This RST will abort the -// connection immediately, whether or not the client had received the GOAWAY. -// -// Ideally we should delay for at least 1 RTT + epsilon so the client has -// a chance to read the GOAWAY and stop sending messages. Measuring RTT -// is hard, so we approximate with 1 second. See golang.org/issue/18701. -// -// This is a var so it can be shorter in tests, where all requests uses the -// loopback interface making the expected RTT very small. -// -// TODO: configurable? -var goAwayTimeout = 1 * time.Second - -func (sc *serverConn) startGracefulShutdownInternal() { - sc.goAway(ErrCodeNo) -} - -func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - if sc.inGoAway { - return - } - sc.inGoAway = true - sc.needToSendGoAway = true - sc.goAwayCode = code - sc.scheduleFrameWrite() -} - -func (sc *serverConn) shutDownIn(d time.Duration) { - sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) -} - -func (sc *serverConn) resetStream(se StreamError) { - sc.serveG.check() - sc.writeFrame(FrameWriteRequest{write: se}) - if st, ok := sc.streams[se.StreamID]; ok { - st.resetQueued = true - } -} - -// processFrameFromReader processes the serve loop's read from readFrameCh from the -// frame-reading goroutine. -// processFrameFromReader returns whether the connection should be kept open. -func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { - sc.serveG.check() - err := res.err - if err != nil { - if err == ErrFrameTooLarge { - sc.goAway(ErrCodeFrameSize) - return true // goAway will close the loop - } - clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) - if clientGone { - // TODO: could we also get into this state if - // the peer does a half close - // (e.g. CloseWrite) because they're done - // sending frames but they're still wanting - // our open replies? Investigate. - // TODO: add CloseWrite to crypto/tls.Conn first - // so we have a way to test this? I suppose - // just for testing we could have a non-TLS mode. - return false - } - } else { - f := res.f - if VerboseLogs { - sc.vlogf("http2: server read frame %v", summarizeFrame(f)) - } - err = sc.processFrame(f) - if err == nil { - return true - } - } - - switch ev := err.(type) { - case StreamError: - sc.resetStream(ev) - return true - case goAwayFlowError: - sc.goAway(ErrCodeFlowControl) - return true - case ConnectionError: - sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) - sc.goAway(ErrCode(ev)) - return true // goAway will handle shutdown - default: - if res.err != nil { - sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) - } else { - sc.logf("http2: server closing client connection: %v", err) - } - return false - } -} - -func (sc *serverConn) processFrame(f Frame) error { - sc.serveG.check() - - // First frame received must be SETTINGS. - if !sc.sawFirstSettings { - if _, ok := f.(*SettingsFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } - sc.sawFirstSettings = true - } - - switch f := f.(type) { - case *SettingsFrame: - return sc.processSettings(f) - case *MetaHeadersFrame: - return sc.processHeaders(f) - case *WindowUpdateFrame: - return sc.processWindowUpdate(f) - case *PingFrame: - return sc.processPing(f) - case *DataFrame: - return sc.processData(f) - case *RSTStreamFrame: - return sc.processResetStream(f) - case *PriorityFrame: - return sc.processPriority(f) - case *GoAwayFrame: - return sc.processGoAway(f) - case *PushPromiseFrame: - // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE - // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - default: - sc.vlogf("http2: server ignoring frame: %v", f.Header()) - return nil - } -} - -func (sc *serverConn) processPing(f *PingFrame) error { - sc.serveG.check() - if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." - return nil - } - if f.StreamID != 0 { - // "PING frames are not associated with any individual - // stream. If a PING frame is received with a stream - // identifier field value other than 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) - return nil -} - -func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { - sc.serveG.check() - switch { - case f.StreamID != 0: // stream-level flow control - state, st := sc.state(f.StreamID) - if state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil { - // "WINDOW_UPDATE can be sent by a peer that has sent a - // frame bearing the END_STREAM flag. This means that a - // receiver could receive a WINDOW_UPDATE frame on a "half - // closed (remote)" or "closed" stream. A receiver MUST - // NOT treat this as an error, see Section 5.1." - return nil - } - if !st.flow.add(int32(f.Increment)) { - return streamError(f.StreamID, ErrCodeFlowControl) - } - default: // connection-level flow control - if !sc.flow.add(int32(f.Increment)) { - return goAwayFlowError{} - } - } - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { - sc.serveG.check() - - state, st := sc.state(f.StreamID) - if state == stateIdle { - // 6.4 "RST_STREAM frames MUST NOT be sent for a - // stream in the "idle" state. If a RST_STREAM frame - // identifying an idle stream is received, the - // recipient MUST treat this as a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - } - if st != nil { - st.cancelCtx() - sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) - } - return nil -} - -func (sc *serverConn) closeStream(st *stream, err error) { - sc.serveG.check() - if st.state == stateIdle || st.state == stateClosed { - panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) - } - st.state = stateClosed - if st.writeDeadline != nil { - st.writeDeadline.Stop() - } - if st.isPushed() { - sc.curPushedStreams-- - } else { - sc.curClientStreams-- - } - delete(sc.streams, st.id) - if len(sc.streams) == 0 { - sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { - sc.idleTimer.Reset(sc.srv.IdleTimeout) - } - if h1ServerKeepAlivesDisabled(sc.hs) { - sc.startGracefulShutdownInternal() - } - } - if p := st.body; p != nil { - // Return any buffered unread bytes worth of conn-level flow control. - // See golang.org/issue/16481 - sc.sendWindowUpdate(nil, p.Len()) - - p.CloseWithError(err) - } - st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc - sc.writeSched.CloseStream(st.id) -} - -func (sc *serverConn) processSettings(f *SettingsFrame) error { - sc.serveG.check() - if f.IsAck() { - sc.unackedSettings-- - if sc.unackedSettings < 0 { - // Why is the peer ACKing settings we never sent? - // The spec doesn't mention this case, but - // hang up on them anyway. - return ConnectionError(ErrCodeProtocol) - } - return nil - } - if err := f.ForeachSetting(sc.processSetting); err != nil { - return err - } - sc.needToSendSettingsAck = true - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processSetting(s Setting) error { - sc.serveG.check() - if err := s.Valid(); err != nil { - return err - } - if VerboseLogs { - sc.vlogf("http2: server processing setting %v", s) - } - switch s.ID { - case SettingHeaderTableSize: - sc.headerTableSize = s.Val - sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) - case SettingEnablePush: - sc.pushEnabled = s.Val != 0 - case SettingMaxConcurrentStreams: - sc.clientMaxStreams = s.Val - case SettingInitialWindowSize: - return sc.processSettingInitialWindowSize(s.Val) - case SettingMaxFrameSize: - sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 - case SettingMaxHeaderListSize: - sc.peerMaxHeaderListSize = s.Val - default: - // Unknown setting: "An endpoint that receives a SETTINGS - // frame with any unknown or unsupported identifier MUST - // ignore that setting." - if VerboseLogs { - sc.vlogf("http2: server ignoring unknown setting %v", s) - } - } - return nil -} - -func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { - sc.serveG.check() - // Note: val already validated to be within range by - // processSetting's Valid call. - - // "A SETTINGS frame can alter the initial flow control window - // size for all current streams. When the value of - // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST - // adjust the size of all stream flow control windows that it - // maintains by the difference between the new value and the - // old value." - old := sc.initialStreamSendWindowSize - sc.initialStreamSendWindowSize = int32(val) - growth := int32(val) - old // may be negative - for _, st := range sc.streams { - if !st.flow.add(growth) { - // 6.9.2 Initial Flow Control Window Size - // "An endpoint MUST treat a change to - // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow - // control window to exceed the maximum size as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR." - return ConnectionError(ErrCodeFlowControl) - } - } - return nil -} - -func (sc *serverConn) processData(f *DataFrame) error { - sc.serveG.check() - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - data := f.Data() - - // "If a DATA frame is received whose stream is not in "open" - // or "half closed (local)" state, the recipient MUST respond - // with a stream error (Section 5.4.2) of type STREAM_CLOSED." - id := f.Header().StreamID - state, st := sc.state(id) - if id == 0 || state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { - // This includes sending a RST_STREAM if the stream is - // in stateHalfClosedLocal (which currently means that - // the http.Handler returned, so it's done reading & - // done writing). Try to stop the client from sending - // more DATA. - - // But still enforce their connection-level flow control, - // and return any flow control bytes since we're not going - // to consume them. - if sc.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - // Deduct the flow control from inflow, since we're - // going to immediately add it back in - // sendWindowUpdate, which also schedules sending the - // frames. - sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil, int(f.Length)) // conn-level - - if st != nil && st.resetQueued { - // Already have a stream error in flight. Don't send another. - return nil - } - return streamError(id, ErrCodeStreamClosed) - } - if st.body == nil { - panic("internal error: should have a body in this state") - } - - // Sender sending more than they'd declared? - if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the - // value of a content-length header field does not equal the sum of the - // DATA frame payload lengths that form the body. - return streamError(id, ErrCodeProtocol) - } - if f.Length > 0 { - // Check whether the client has flow control quota. - if st.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - st.inflow.take(int32(f.Length)) - - if len(data) > 0 { - wrote, err := st.body.Write(data) - if err != nil { - return streamError(id, ErrCodeStreamClosed) - } - if wrote != len(data) { - panic("internal error: bad Writer") - } - st.bodyBytes += int64(len(data)) - } - - // Return any padded flow control now, since we won't - // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - sc.sendWindowUpdate32(nil, pad) - sc.sendWindowUpdate32(st, pad) - } - } - if f.StreamEnded() { - st.endStream() - } - return nil -} - -func (sc *serverConn) processGoAway(f *GoAwayFrame) error { - sc.serveG.check() - if f.ErrCode != ErrCodeNo { - sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } else { - sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } - sc.startGracefulShutdownInternal() - // http://tools.ietf.org/html/rfc7540#section-6.8 - // We should not create any new streams, which means we should disable push. - sc.pushEnabled = false - return nil -} - -// isPushed reports whether the stream is server-initiated. -func (st *stream) isPushed() bool { - return st.id%2 == 0 -} - -// endStream closes a Request.Body's pipe. It is called when a DATA -// frame says a request body is over (or after trailers). -func (st *stream) endStream() { - sc := st.sc - sc.serveG.check() - - if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { - st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", - st.declBodyBytes, st.bodyBytes)) - } else { - st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) - st.body.CloseWithError(io.EOF) - } - st.state = stateHalfClosedRemote -} - -// copyTrailersToHandlerRequest is run in the Handler's goroutine in -// its Request.Body.Read just before it gets io.EOF. -func (st *stream) copyTrailersToHandlerRequest() { - for k, vv := range st.trailer { - if _, ok := st.reqTrailer[k]; ok { - // Only copy it over it was pre-declared. - st.reqTrailer[k] = vv - } - } -} - -// onWriteTimeout is run on its own goroutine (from time.AfterFunc) -// when the stream's WriteTimeout has fired. -func (st *stream) onWriteTimeout() { - st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) -} - -func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { - sc.serveG.check() - id := f.StreamID - if sc.inGoAway { - // Ignore. - return nil - } - // http://tools.ietf.org/html/rfc7540#section-5.1.1 - // Streams initiated by a client MUST use odd-numbered stream - // identifiers. [...] An endpoint that receives an unexpected - // stream identifier MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - if id%2 != 1 { - return ConnectionError(ErrCodeProtocol) - } - // A HEADERS frame can be used to create a new stream or - // send a trailer for an open one. If we already have a stream - // open, let it process its own HEADERS frame (trailers at this - // point, if it's valid). - if st := sc.streams[f.StreamID]; st != nil { - if st.resetQueued { - // We're sending RST_STREAM to close the stream, so don't bother - // processing this frame. - return nil - } - return st.processTrailerHeaders(f) - } - - // [...] The identifier of a newly established stream MUST be - // numerically greater than all streams that the initiating - // endpoint has opened or reserved. [...] An endpoint that - // receives an unexpected stream identifier MUST respond with - // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxClientStreamID { - return ConnectionError(ErrCodeProtocol) - } - sc.maxClientStreamID = id - - if sc.idleTimer != nil { - sc.idleTimer.Stop() - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.2 - // [...] Endpoints MUST NOT exceed the limit set by their peer. An - // endpoint that receives a HEADERS frame that causes their - // advertised concurrent stream limit to be exceeded MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR - // or REFUSED_STREAM. - if sc.curClientStreams+1 > sc.advMaxStreams { - if sc.unackedSettings == 0 { - // They should know better. - return streamError(id, ErrCodeProtocol) - } - // Assume it's a network race, where they just haven't - // received our last SETTINGS update. But actually - // this can't happen yet, because we don't yet provide - // a way for users to adjust server parameters at - // runtime. - return streamError(id, ErrCodeRefusedStream) - } - - initialState := stateOpen - if f.StreamEnded() { - initialState = stateHalfClosedRemote - } - st := sc.newStream(id, 0, initialState) - - if f.HasPriority() { - if err := checkPriority(f.StreamID, f.Priority); err != nil { - return err - } - sc.writeSched.AdjustStream(st.id, f.Priority) - } - - rw, req, err := sc.newWriterAndRequest(st, f) - if err != nil { - return err - } - st.reqTrailer = req.Trailer - if st.reqTrailer != nil { - st.trailer = make(http.Header) - } - st.body = req.Body.(*requestBody).pipe // may be nil - st.declBodyBytes = req.ContentLength - - handler := sc.handler.ServeHTTP - if f.Truncated { - // Their header list was too long. Send a 431 error. - handler = handleHeaderListTooLong - } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { - handler = new400Handler(err) - } - - // The net/http package sets the read deadline from the - // http.Server.ReadTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already - // set. Disarm it here after the request headers are read, - // similar to how the http1 server works. Here it's - // technically more like the http1 Server's ReadHeaderTimeout - // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { - sc.conn.SetReadDeadline(time.Time{}) - } - - go sc.runHandler(rw, req, handler) - return nil -} - -func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - if !f.StreamEnded() { - return streamError(st.id, ErrCodeProtocol) - } - - if len(f.PseudoFields()) > 0 { - return streamError(st.id, ErrCodeProtocol) - } - if st.trailer != nil { - for _, hf := range f.RegularFields() { - key := sc.canonicalHeader(hf.Name) - if !httpguts.ValidTrailerHeader(key) { - // TODO: send more details to the peer somehow. But http2 has - // no way to send debug data at a stream level. Discuss with - // HTTP folk. - return streamError(st.id, ErrCodeProtocol) - } - st.trailer[key] = append(st.trailer[key], hf.Value) - } - } - st.endStream() - return nil -} - -func checkPriority(streamID uint32, p PriorityParam) error { - if streamID == p.StreamDep { - // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." - // Section 5.3.3 says that a stream can depend on one of its dependencies, - // so it's only self-dependencies that are forbidden. - return streamError(streamID, ErrCodeProtocol) - } - return nil -} - -func (sc *serverConn) processPriority(f *PriorityFrame) error { - if sc.inGoAway { - return nil - } - if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { - return err - } - sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) - return nil -} - -func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { - sc.serveG.check() - if id == 0 { - panic("internal error: cannot create stream with id 0") - } - - ctx, cancelCtx := contextWithCancel(sc.baseCtx) - st := &stream{ - sc: sc, - id: id, - state: state, - ctx: ctx, - cancelCtx: cancelCtx, - } - st.cw.Init() - st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) - } - - sc.streams[id] = st - sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) - if st.isPushed() { - sc.curPushedStreams++ - } else { - sc.curClientStreams++ - } - if sc.curOpenStreams() == 1 { - sc.setConnState(http.StateActive) - } - - return st -} - -func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - } - - isConnect := rp.method == "CONNECT" - if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { - // See 8.1.2.6 Malformed Requests and Responses: - // - // Malformed requests or responses that are detected - // MUST be treated as a stream error (Section 5.4.2) - // of type PROTOCOL_ERROR." - // - // 8.1.2.3 Request Pseudo-Header Fields - // "All HTTP/2 requests MUST include exactly one valid - // value for the :method, :scheme, and :path - // pseudo-header fields" - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - bodyOpen := !f.StreamEnded() - if rp.method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - rp.header = make(http.Header) - for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) - } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") - } - - rw, req, err := sc.newWriterAndRequestNoBody(st, rp) - if err != nil { - return nil, nil, err - } - if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) - } else { - req.ContentLength = -1 - } - req.Body.(*requestBody).pipe = &pipe{ - b: &dataBuffer{expected: req.ContentLength}, - } - } - return rw, req, nil -} - -type requestParam struct { - method string - scheme, authority, path string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { - tlsState = sc.tlsState - } - - needsContinue := rp.header.Get("Expect") == "100-continue" - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, streamError(st.id, ErrCodeProtocol) - } - requestURI = rp.path - } - - body := &requestBody{ - conn: sc, - stream: st, - needsContinue: needsContinue, - } - req := &http.Request{ - Method: rp.method, - URL: url_, - RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, - Proto: "HTTP/2.0", - ProtoMajor: 2, - ProtoMinor: 0, - TLS: tlsState, - Host: rp.authority, - Body: body, - Trailer: trailer, - } - req = requestWithContext(req, st.ctx) - - rws := responseWriterStatePool.Get().(*responseWriterState) - bwSave := rws.bw - *rws = responseWriterState{} // zero all the fields - rws.conn = sc - rws.bw = bwSave - rws.bw.Reset(chunkWriter{rws}) - rws.stream = st - rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil -} - -// Run on its own goroutine. -func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - didPanic := true - defer func() { - rw.rws.stream.cancelCtx() - if didPanic { - e := recover() - sc.writeFrameFromHandler(FrameWriteRequest{ - write: handlerPanicRST{rw.rws.stream.id}, - stream: rw.rws.stream, - }) - // Same as net/http: - if shouldLogPanic(e) { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) - } - return - } - rw.handlerDone() - }() - handler(rw, req) - didPanic = false -} - -func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { - // 10.5.1 Limits on Header Block Size: - // .. "A server that receives a larger header block than it is - // willing to handle can send an HTTP 431 (Request Header Fields Too - // Large) status code" - const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ - w.WriteHeader(statusRequestHeaderFieldsTooLarge) - io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") -} - -// called from handler goroutines. -// h may be nil. -func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { - sc.serveG.checkNotOn() // NOT on - var errc chan error - if headerData.h != nil { - // If there's a header map (which we don't own), so we have to block on - // waiting for this frame to be written, so an http.Flush mid-handler - // writes out the correct value of keys, before a handler later potentially - // mutates it. - errc = errChanPool.Get().(chan error) - } - if err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: headerData, - stream: st, - done: errc, - }); err != nil { - return err - } - if errc != nil { - select { - case err := <-errc: - errChanPool.Put(errc) - return err - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - } - } - return nil -} - -// called from handler goroutines. -func (sc *serverConn) write100ContinueHeaders(st *stream) { - sc.writeFrameFromHandler(FrameWriteRequest{ - write: write100ContinueHeadersFrame{st.id}, - stream: st, - }) -} - -// A bodyReadMsg tells the server loop that the http.Handler read n -// bytes of the DATA from the client on the given stream. -type bodyReadMsg struct { - st *stream - n int -} - -// called from handler goroutines. -// Notes that the handler for the given stream ID read n bytes of its body -// and schedules flow control tokens to be sent. -func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { - sc.serveG.checkNotOn() // NOT on - if n > 0 { - select { - case sc.bodyReadCh <- bodyReadMsg{st, n}: - case <-sc.doneServing: - } - } -} - -func (sc *serverConn) noteBodyRead(st *stream, n int) { - sc.serveG.check() - sc.sendWindowUpdate(nil, n) // conn-level - if st.state != stateHalfClosedRemote && st.state != stateClosed { - // Don't send this WINDOW_UPDATE if the stream is closed - // remotely. - sc.sendWindowUpdate(st, n) - } -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { - sc.serveG.check() - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { - sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } - var streamID uint32 - if st != nil { - streamID = st.id - } - sc.writeFrame(FrameWriteRequest{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, - stream: st, - }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } -} - -// requestBody is the Handler's Request.Body type. -// Read and Close may be called concurrently. -type requestBody struct { - stream *stream - conn *serverConn - closed bool // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue -} - -func (b *requestBody) Close() error { - if b.pipe != nil && !b.closed { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true - return nil -} - -func (b *requestBody) Read(p []byte) (n int, err error) { - if b.needsContinue { - b.needsContinue = false - b.conn.write100ContinueHeaders(b.stream) - } - if b.pipe == nil || b.sawEOF { - return 0, io.EOF - } - n, err = b.pipe.Read(p) - if err == io.EOF { - b.sawEOF = true - } - if b.conn == nil && inTests { - return - } - b.conn.noteBodyReadFromHandler(b.stream, n, err) - return -} - -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The -// responseWriterState pointer inside is zeroed at the end of a -// request (in handlerDone) and calls on the responseWriter thereafter -// simply crash (caller's mistake), but the much larger responseWriterState -// and buffers are reused between multiple requests. -type responseWriter struct { - rws *responseWriterState -} - -// Optional http.ResponseWriter interfaces implemented. -var ( - _ http.CloseNotifier = (*responseWriter)(nil) - _ http.Flusher = (*responseWriter)(nil) - _ stringWriter = (*responseWriter)(nil) -) - -type responseWriterState struct { - // immutable within a request: - stream *stream - req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't - conn *serverConn - - // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc - bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} - - // mutated by http.Handler goroutine: - handlerHeader http.Header // nil until called - snapHeader http.Header // snapshot of handlerHeader at WriteHeader time - trailers []string // set in writeChunk - status int // status code passed to WriteHeader - wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. - sentHeader bool // have we sent the header frame? - handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState - - sentContentLen int64 // non-zero if handler set a Content-Length header - wroteBytes int64 - - closeNotifierMu sync.Mutex // guards closeNotifierCh - closeNotifierCh chan bool // nil until first used -} - -type chunkWriter struct{ rws *responseWriterState } - -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } - -func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } - -// declareTrailer is called for each Trailer header when the -// response header is written. It notes that a header will need to be -// written in the trailers at the end of the response. -func (rws *responseWriterState) declareTrailer(k string) { - k = http.CanonicalHeaderKey(k) - if !httpguts.ValidTrailerHeader(k) { - // Forbidden by RFC 7230, section 4.1.2. - rws.conn.logf("ignoring invalid trailer %q", k) - return - } - if !strSliceContains(rws.trailers, k) { - rws.trailers = append(rws.trailers, k) - } -} - -// writeChunk writes chunks from the bufio.Writer. But because -// bufio.Writer may bypass its chunking, sometimes p may be -// arbitrarily large. -// -// writeChunk is also responsible (on the first chunk) for sending the -// HEADER response. -func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { - if !rws.wroteHeader { - rws.writeHeader(200) - } - - isHeadResp := rws.req.Method == "HEAD" - if !rws.sentHeader { - rws.sentHeader = true - var ctype, clen string - if clen = rws.snapHeader.Get("Content-Length"); clen != "" { - rws.snapHeader.Del("Content-Length") - clen64, err := strconv.ParseInt(clen, 10, 64) - if err == nil && clen64 >= 0 { - rws.sentContentLen = clen64 - } else { - clen = "" - } - } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { - clen = strconv.Itoa(len(p)) - } - _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { - if cto := rws.snapHeader.Get("X-Content-Type-Options"); strings.EqualFold("nosniff", cto) { - // nosniff is an explicit directive not to guess a content-type. - // Content-sniffing is no less susceptible to polyglot attacks via - // hosted content when done on the server. - ctype = "application/octet-stream" - rws.conn.logf("http2: WriteHeader called with X-Content-Type-Options:nosniff but no Content-Type") - } else { - ctype = http.DetectContentType(p) - } - } - var date string - if _, ok := rws.snapHeader["Date"]; !ok { - // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) - } - - for _, v := range rws.snapHeader["Trailer"] { - foreachHeaderElement(v, rws.declareTrailer) - } - - // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), - // but respect "Connection" == "close" to mean sending a GOAWAY and tearing - // down the TCP connection when idle, like we do for HTTP/1. - // TODO: remove more Connection-specific header fields here, in addition - // to "Connection". - if _, ok := rws.snapHeader["Connection"]; ok { - v := rws.snapHeader.Get("Connection") - delete(rws.snapHeader, "Connection") - if v == "close" { - rws.conn.startGracefulShutdown() - } - } - - endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - httpResCode: rws.status, - h: rws.snapHeader, - endStream: endStream, - contentType: ctype, - contentLength: clen, - date: date, - }) - if err != nil { - rws.dirty = true - return 0, err - } - if endStream { - return 0, nil - } - } - if isHeadResp { - return len(p), nil - } - if len(p) == 0 && !rws.handlerDone { - return 0, nil - } - - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - - endStream := rws.handlerDone && !rws.hasTrailers() - if len(p) > 0 || endStream { - // only send a 0 byte DATA frame if we're ending the stream. - if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true - return 0, err - } - } - - if rws.handlerDone && rws.hasTrailers() { - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - h: rws.handlerHeader, - trailers: rws.trailers, - endStream: true, - }) - if err != nil { - rws.dirty = true - } - return len(p), err - } - return len(p), nil -} - -// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys -// that, if present, signals that the map entry is actually for -// the response trailers, and not the response headers. The prefix -// is stripped after the ServeHTTP call finishes and the values are -// sent in the trailers. -// -// This mechanism is intended only for trailers that are not known -// prior to the headers being written. If the set of trailers is fixed -// or known before the header is written, the normal Go trailers mechanism -// is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers -const TrailerPrefix = "Trailer:" - -// promoteUndeclaredTrailers permits http.Handlers to set trailers -// after the header has already been flushed. Because the Go -// ResponseWriter interface has no way to set Trailers (only the -// Header), and because we didn't want to expand the ResponseWriter -// interface, and because nobody used trailers, and because RFC 7230 -// says you SHOULD (but not must) predeclare any trailers in the -// header, the official ResponseWriter rules said trailers in Go must -// be predeclared, and then we reuse the same ResponseWriter.Header() -// map to mean both Headers and Trailers. When it's time to write the -// Trailers, we pick out the fields of Headers that were declared as -// trailers. That worked for a while, until we found the first major -// user of Trailers in the wild: gRPC (using them only over http2), -// and gRPC libraries permit setting trailers mid-stream without -// predeclarnig them. So: change of plans. We still permit the old -// way, but we also permit this hack: if a Header() key begins with -// "Trailer:", the suffix of that key is a Trailer. Because ':' is an -// invalid token byte anyway, there is no ambiguity. (And it's already -// filtered out) It's mildly hacky, but not terrible. -// -// This method runs after the Handler is done and promotes any Header -// fields to be trailers. -func (rws *responseWriterState) promoteUndeclaredTrailers() { - for k, vv := range rws.handlerHeader { - if !strings.HasPrefix(k, TrailerPrefix) { - continue - } - trailerKey := strings.TrimPrefix(k, TrailerPrefix) - rws.declareTrailer(trailerKey) - rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv - } - - if len(rws.trailers) > 1 { - sorter := sorterPool.Get().(*sorter) - sorter.SortStrings(rws.trailers) - sorterPool.Put(sorter) - } -} - -func (w *responseWriter) Flush() { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } - } else { - // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it - // ourselves to force the HTTP response header and/or - // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) - } -} - -func (w *responseWriter) CloseNotify() <-chan bool { - rws := w.rws - if rws == nil { - panic("CloseNotify called after Handler finished") - } - rws.closeNotifierMu.Lock() - ch := rws.closeNotifierCh - if ch == nil { - ch = make(chan bool, 1) - rws.closeNotifierCh = ch - cw := rws.stream.cw - go func() { - cw.Wait() // wait for close - ch <- true - }() - } - rws.closeNotifierMu.Unlock() - return ch -} - -func (w *responseWriter) Header() http.Header { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.handlerHeader == nil { - rws.handlerHeader = make(http.Header) - } - return rws.handlerHeader -} - -// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. -func checkWriteHeaderCode(code int) { - // Issue 22880: require valid WriteHeader status codes. - // For now we only enforce that it's three digits. - // In the future we might block things over 599 (600 and above aren't defined - // at http://httpwg.org/specs/rfc7231.html#status.codes) - // and we might block under 200 (once we have more mature 1xx support). - // But for now any three digits. - // - // We used to send "HTTP/1.1 000 0" on the wire in responses but there's - // no equivalent bogus thing we can realistically send in HTTP/2, - // so we'll consistently panic instead and help people find their bugs - // early. (We can't return an error from WriteHeader even if we wanted to.) - if code < 100 || code > 999 { - panic(fmt.Sprintf("invalid WriteHeader code %v", code)) - } -} - -func (w *responseWriter) WriteHeader(code int) { - rws := w.rws - if rws == nil { - panic("WriteHeader called after Handler finished") - } - rws.writeHeader(code) -} - -func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - checkWriteHeaderCode(code) - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) - } - } -} - -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -// The Life Of A Write is like this: -// -// * Handler calls w.Write or w.WriteString -> -// * -> rws.bw (*bufio.Writer) -> -// * (Handler might call Flush) -// * -> chunkWriter{rws} -// * -> responseWriterState.writeChunk(p []byte) -// * -> responseWriterState.writeChunk (most of the magic; see comment there) -func (w *responseWriter) Write(p []byte) (n int, err error) { - return w.write(len(p), p, "") -} - -func (w *responseWriter) WriteString(s string) (n int, err error) { - return w.write(len(s), nil, s) -} - -// either dataB or dataS is non-zero. -func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { - rws := w.rws - if rws == nil { - panic("Write called after Handler finished") - } - if !rws.wroteHeader { - w.WriteHeader(200) - } - if !bodyAllowedForStatus(rws.status) { - return 0, http.ErrBodyNotAllowed - } - rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set - if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { - // TODO: send a RST_STREAM - return 0, errors.New("http2: handler wrote more than declared Content-Length") - } - - if dataB != nil { - return rws.bw.Write(dataB) - } else { - return rws.bw.WriteString(dataS) - } -} - -func (w *responseWriter) handlerDone() { - rws := w.rws - dirty := rws.dirty - rws.handlerDone = true - w.Flush() - w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } -} - -// Push errors. -var ( - ErrRecursivePush = errors.New("http2: recursive push not allowed") - ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") -) - -// pushOptions is the internal version of http.PushOptions, which we -// cannot include here because it's only defined in Go 1.8 and later. -type pushOptions struct { - Method string - Header http.Header -} - -func (w *responseWriter) push(target string, opts pushOptions) error { - st := w.rws.stream - sc := st.sc - sc.serveG.checkNotOn() - - // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." - // http://tools.ietf.org/html/rfc7540#section-6.6 - if st.isPushed() { - return ErrRecursivePush - } - - // Default options. - if opts.Method == "" { - opts.Method = "GET" - } - if opts.Header == nil { - opts.Header = http.Header{} - } - wantScheme := "http" - if w.rws.req.TLS != nil { - wantScheme = "https" - } - - // Validate the request. - u, err := url.Parse(target) - if err != nil { - return err - } - if u.Scheme == "" { - if !strings.HasPrefix(target, "/") { - return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) - } - u.Scheme = wantScheme - u.Host = w.rws.req.Host - } else { - if u.Scheme != wantScheme { - return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) - } - if u.Host == "" { - return errors.New("URL must have a host") - } - } - for k := range opts.Header { - if strings.HasPrefix(k, ":") { - return fmt.Errorf("promised request headers cannot include pseudo header %q", k) - } - // These headers are meaningful only if the request has a body, - // but PUSH_PROMISE requests cannot have a body. - // http://tools.ietf.org/html/rfc7540#section-8.2 - // Also disallow Host, since the promised URL must be absolute. - switch strings.ToLower(k) { - case "content-length", "content-encoding", "trailer", "te", "expect", "host": - return fmt.Errorf("promised request headers cannot include %q", k) - } - } - if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { - return err - } - - // The RFC effectively limits promised requests to GET and HEAD: - // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" - // http://tools.ietf.org/html/rfc7540#section-8.2 - if opts.Method != "GET" && opts.Method != "HEAD" { - return fmt.Errorf("method %q must be GET or HEAD", opts.Method) - } - - msg := &startPushRequest{ - parent: st, - method: opts.Method, - url: u, - header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case sc.serveMsgCh <- msg: - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case err := <-msg.done: - errChanPool.Put(msg.done) - return err - } -} - -type startPushRequest struct { - parent *stream - method string - url *url.URL - header http.Header - done chan error -} - -func (sc *serverConn) startPush(msg *startPushRequest) { - sc.serveG.check() - - // http://tools.ietf.org/html/rfc7540#section-6.6. - // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that - // is in either the "open" or "half-closed (remote)" state. - if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { - // responseWriter.Push checks that the stream is peer-initiaed. - msg.done <- errStreamClosed - return - } - - // http://tools.ietf.org/html/rfc7540#section-6.6. - if !sc.pushEnabled { - msg.done <- http.ErrNotSupported - return - } - - // PUSH_PROMISE frames must be sent in increasing order by stream ID, so - // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE - // is written. Once the ID is allocated, we start the request handler. - allocatePromisedID := func() (uint32, error) { - sc.serveG.check() - - // Check this again, just in case. Technically, we might have received - // an updated SETTINGS by the time we got around to writing this frame. - if !sc.pushEnabled { - return 0, http.ErrNotSupported - } - // http://tools.ietf.org/html/rfc7540#section-6.5.2. - if sc.curPushedStreams+1 > sc.clientMaxStreams { - return 0, ErrPushLimitReached - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.1. - // Streams initiated by the server MUST use even-numbered identifiers. - // A server that is unable to establish a new stream identifier can send a GOAWAY - // frame so that the client is forced to open a new connection for new streams. - if sc.maxPushPromiseID+2 >= 1<<31 { - sc.startGracefulShutdownInternal() - return 0, ErrPushLimitReached - } - sc.maxPushPromiseID += 2 - promisedID := sc.maxPushPromiseID - - // http://tools.ietf.org/html/rfc7540#section-8.2. - // Strictly speaking, the new stream should start in "reserved (local)", then - // transition to "half closed (remote)" after sending the initial HEADERS, but - // we start in "half closed (remote)" for simplicity. - // See further comments at the definition of stateHalfClosedRemote. - promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE - }) - if err != nil { - // Should not happen, since we've already validated msg.url. - panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) - } - - go sc.runHandler(rw, req, sc.handler.ServeHTTP) - return promisedID, nil - } - - sc.writeFrame(FrameWriteRequest{ - write: &writePushPromise{ - streamID: msg.parent.id, - method: msg.method, - url: msg.url, - h: msg.header, - allocatePromisedID: allocatePromisedID, - }, - stream: msg.parent, - done: msg.done, - }) -} - -// foreachHeaderElement splits v according to the "#rule" construction -// in RFC 7230 section 7 and calls fn for each non-empty element. -func foreachHeaderElement(v string, fn func(string)) { - v = textproto.TrimString(v) - if v == "" { - return - } - if !strings.Contains(v, ",") { - fn(v) - return - } - for _, f := range strings.Split(v, ",") { - if f = textproto.TrimString(f); f != "" { - fn(f) - } - } -} - -// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 -var connHeaders = []string{ - "Connection", - "Keep-Alive", - "Proxy-Connection", - "Transfer-Encoding", - "Upgrade", -} - -// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, -// per RFC 7540 Section 8.1.2.2. -// The returned error is reported to users. -func checkValidHTTP2RequestHeaders(h http.Header) error { - for _, k := range connHeaders { - if _, ok := h[k]; ok { - return fmt.Errorf("request header %q is not valid in HTTP/2", k) - } - } - te := h["Te"] - if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { - return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) - } - return nil -} - -func new400Handler(err error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - http.Error(w, err.Error(), http.StatusBadRequest) - } -} - -// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives -// disabled. See comments on h1ServerShutdownChan above for why -// the code is written this way. -func h1ServerKeepAlivesDisabled(hs *http.Server) bool { - var x interface{} = hs - type I interface { - doKeepAlives() bool - } - if hs, ok := x.(I); ok { - return !hs.doKeepAlives() - } - return false -} diff --git a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml deleted file mode 100644 index 31a84be..0000000 --- a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml +++ /dev/null @@ -1,5021 +0,0 @@ - - - - - - - - - - - - - - - - - - - Hypertext Transfer Protocol version 2 - - - Twist -
- mbelshe@chromium.org -
-
- - - Google, Inc -
- fenix@google.com -
-
- - - Mozilla -
- - 331 E Evelyn Street - Mountain View - CA - 94041 - US - - martin.thomson@gmail.com -
-
- - - Applications - HTTPbis - HTTP - SPDY - Web - - - - This specification describes an optimized expression of the semantics of the Hypertext - Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a - reduced perception of latency by introducing header field compression and allowing multiple - concurrent messages on the same connection. It also introduces unsolicited push of - representations from servers to clients. - - - This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax. - HTTP's existing semantics remain unchanged. - - - - - - Discussion of this draft takes place on the HTTPBIS working group mailing list - (ietf-http-wg@w3.org), which is archived at . - - - Working Group information can be found at ; that specific to HTTP/2 are at . - - - The changes in this draft are summarized in . - - - -
- - -
- - - The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the - HTTP/1.1 message format () has - several characteristics that have a negative overall effect on application performance - today. - - - In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given - TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed - request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1 - clients that need to make many requests typically use multiple connections to a server in - order to achieve concurrency and thereby reduce latency. - - - Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary - network traffic, as well as causing the initial TCP congestion - window to quickly fill. This can result in excessive latency when multiple requests are - made on a new TCP connection. - - - HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an - underlying connection. Specifically, it allows interleaving of request and response - messages on the same connection and uses an efficient coding for HTTP header fields. It - also allows prioritization of requests, letting more important requests complete more - quickly, further improving performance. - - - The resulting protocol is more friendly to the network, because fewer TCP connections can - be used in comparison to HTTP/1.x. This means less competition with other flows, and - longer-lived connections, which in turn leads to better utilization of available network - capacity. - - - Finally, HTTP/2 also enables more efficient processing of messages through use of binary - message framing. - -
- -
- - HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core - features of HTTP/1.1, but aims to be more efficient in several ways. - - - The basic protocol unit in HTTP/2 is a frame. Each frame - type serves a different purpose. For example, HEADERS and - DATA frames form the basis of HTTP requests and - responses; other frame types like SETTINGS, - WINDOW_UPDATE, and PUSH_PROMISE are used in support of other - HTTP/2 features. - - - Multiplexing of requests is achieved by having each HTTP request-response exchange - associated with its own stream. Streams are largely - independent of each other, so a blocked or stalled request or response does not prevent - progress on other streams. - - - Flow control and prioritization ensure that it is possible to efficiently use multiplexed - streams. Flow control helps to ensure that only data that - can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed - to the most important streams first. - - - HTTP/2 adds a new interaction mode, whereby a server can push - responses to a client. Server push allows a server to speculatively send a client - data that the server anticipates the client will need, trading off some network usage - against a potential latency gain. The server does this by synthesizing a request, which it - sends as a PUSH_PROMISE frame. The server is then able to send a response to - the synthetic request on a separate stream. - - - Frames that contain HTTP header fields are compressed. - HTTP requests can be highly redundant, so compression can reduce the size of requests and - responses significantly. - - -
- - The HTTP/2 specification is split into four parts: - - - Starting HTTP/2 covers how an HTTP/2 connection is - initiated. - - - The framing and streams layers describe the way HTTP/2 frames are - structured and formed into multiplexed streams. - - - Frame and error - definitions include details of the frame and error types used in HTTP/2. - - - HTTP mappings and additional - requirements describe how HTTP semantics are expressed using frames and - streams. - - - - - While some of the frame and stream layer concepts are isolated from HTTP, this - specification does not define a completely generic framing layer. The framing and streams - layers are tailored to the needs of the HTTP protocol and server push. - -
- -
- - The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD - NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as - described in RFC 2119. - - - All numeric values are in network byte order. Values are unsigned unless otherwise - indicated. Literal values are provided in decimal or hexadecimal as appropriate. - Hexadecimal literals are prefixed with 0x to distinguish them - from decimal literals. - - - The following terms are used: - - - The endpoint initiating the HTTP/2 connection. - - - A transport-layer connection between two endpoints. - - - An error that affects the entire HTTP/2 connection. - - - Either the client or server of the connection. - - - The smallest unit of communication within an HTTP/2 connection, consisting of a header - and a variable-length sequence of octets structured according to the frame type. - - - An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint - that is remote to the primary subject of discussion. - - - An endpoint that is receiving frames. - - - An endpoint that is transmitting frames. - - - The endpoint which did not initiate the HTTP/2 connection. - - - A bi-directional flow of frames across a virtual channel within the HTTP/2 connection. - - - An error on the individual HTTP/2 stream. - - - - - Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined - in . - -
-
- -
- - An HTTP/2 connection is an application layer protocol running on top of a TCP connection - (). The client is the TCP connection initiator. - - - HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same - default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result, - implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the - upstream server (the immediate peer to which the client wishes to establish a connection) - supports HTTP/2. - - - - The means by which support for HTTP/2 is determined is different for "http" and "https" - URIs. Discovery for "http" URIs is described in . Discovery - for "https" URIs is described in . - - -
- - The protocol defined in this document has two identifiers. - - - - The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN) - field and any place that HTTP/2 over TLS is identified. - - - The "h2" string is serialized into an ALPN protocol identifier as the two octet - sequence: 0x68, 0x32. - - - - - The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP. - This identifier is used in the HTTP/1.1 Upgrade header field and any place that - HTTP/2 over TCP is identified. - - - - - - Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message - semantics described in this document. - - - RFC Editor's Note: please remove the remainder of this section prior to the - publication of a final version of this document. - - - Only implementations of the final, published RFC can identify themselves as "h2" or "h2c". - Until such an RFC exists, implementations MUST NOT identify themselves using these - strings. - - - Examples and text throughout the rest of this document use "h2" as a matter of - editorial convenience only. Implementations of draft versions MUST NOT identify using - this string. - - - Implementations of draft versions of the protocol MUST add the string "-" and the - corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11 - over TLS is identified using the string "h2-11". - - - Non-compatible experiments that are based on these draft versions MUST append the string - "-" and an experiment name to the identifier. For example, an experimental implementation - of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself - as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in - . Experimenters are - encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list. - -
- -
- - A client that makes a request for an "http" URI without prior knowledge about support for - HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade - header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include - exactly one HTTP2-Settings header field. - -
- For example: - - -]]> -
- - Requests that contain an entity body MUST be sent in their entirety before the client can - send HTTP/2 frames. This means that a large request entity can block the use of the - connection until it is completely sent. - - - If concurrency of an initial request with subsequent requests is important, an OPTIONS - request can be used to perform the upgrade to HTTP/2, at the cost of an additional - round-trip. - - - A server that does not support HTTP/2 can respond to the request as though the Upgrade - header field were absent: - -
- -HTTP/1.1 200 OK -Content-Length: 243 -Content-Type: text/html - -... - -
- - A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with - "h2" implies HTTP/2 over TLS, which is instead negotiated as described in . - - - A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols) - response. After the empty line that terminates the 101 response, the server can begin - sending HTTP/2 frames. These frames MUST include a response to the request that initiated - the Upgrade. - - -
- - For example: - - -HTTP/1.1 101 Switching Protocols -Connection: Upgrade -Upgrade: h2c - -[ HTTP/2 connection ... - -
- - The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a - SETTINGS frame. - - - The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is - assigned default priority values. Stream 1 is - implicitly half closed from the client toward the server, since the request is completed - as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the - response. - - -
- - A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field - that includes parameters that govern the HTTP/2 connection, provided in anticipation of - the server accepting the request to upgrade. - -
- -
- - A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present, - or if more than one is present. A server MUST NOT send this header field. - - - - The content of the HTTP2-Settings header field is the - payload of a SETTINGS frame (), encoded as a - base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The - ABNF production for token68 is - defined in . - - - Since the upgrade is only intended to apply to the immediate connection, a client - sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded - downstream. - - - A server decodes and interprets these values as it would any other - SETTINGS frame. Acknowledgement of the - SETTINGS parameters is not necessary, since a 101 response serves as implicit - acknowledgment. Providing these values in the Upgrade request gives a client an - opportunity to provide parameters prior to receiving any frames from the server. - -
-
- -
- - A client that makes a request to an "https" URI uses TLS - with the application layer protocol negotiation extension. - - - HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a - client or selected by a server. - - - Once TLS negotiation is complete, both the client and the server send a connection preface. - -
- -
- - A client can learn that a particular server supports HTTP/2 by other means. For example, - describes a mechanism for advertising this capability. - - - A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2, - after the connection preface; a server can - identify such a connection by the presence of the connection preface. This only affects - the establishment of HTTP/2 connections over cleartext TCP; implementations that support - HTTP/2 over TLS MUST use protocol negotiation in TLS. - - - Without additional information, prior support for HTTP/2 is not a strong signal that a - given server will support HTTP/2 for future connections. For example, it is possible for - server configurations to change, for configurations to differ between instances in - clustered servers, or for network conditions to change. - -
- -
- - Upon establishment of a TCP connection and determination that HTTP/2 will be used by both - peers, each endpoint MUST send a connection preface as a final confirmation and to - establish the initial SETTINGS parameters for the HTTP/2 connection. The client and - server each send a different connection preface. - - - The client connection preface starts with a sequence of 24 octets, which in hex notation - are: - -
- -
- - (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence - is followed by a SETTINGS frame (). The - SETTINGS frame MAY be empty. The client sends the client connection - preface immediately upon receipt of a 101 Switching Protocols response (indicating a - successful upgrade), or as the first application data octets of a TLS connection. If - starting an HTTP/2 connection with prior knowledge of server support for the protocol, the - client connection preface is sent upon connection establishment. - - - - - The client connection preface is selected so that a large proportion of HTTP/1.1 or - HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note - that this does not address the concerns raised in . - - - - - The server connection preface consists of a potentially empty SETTINGS - frame () that MUST be the first frame the server sends in the - HTTP/2 connection. - - - The SETTINGS frames received from a peer as part of the connection preface - MUST be acknowledged (see ) after sending the connection - preface. - - - To avoid unnecessary latency, clients are permitted to send additional frames to the - server immediately after sending the client connection preface, without waiting to receive - the server connection preface. It is important to note, however, that the server - connection preface SETTINGS frame might include parameters that necessarily - alter how a client is expected to communicate with the server. Upon receiving the - SETTINGS frame, the client is expected to honor any parameters established. - In some configurations, it is possible for the server to transmit SETTINGS - before the client sends additional frames, providing an opportunity to avoid this issue. - - - Clients and servers MUST treat an invalid connection preface as a connection error of type - PROTOCOL_ERROR. A GOAWAY frame () - MAY be omitted in this case, since an invalid preface indicates that the peer is not using - HTTP/2. - -
-
- -
- - Once the HTTP/2 connection is established, endpoints can begin exchanging frames. - - -
- - All frames begin with a fixed 9-octet header followed by a variable-length payload. - -
- -
- - The fields of the frame header are defined as: - - - - The length of the frame payload expressed as an unsigned 24-bit integer. Values - greater than 214 (16,384) MUST NOT be sent unless the receiver has - set a larger value for SETTINGS_MAX_FRAME_SIZE. - - - The 9 octets of the frame header are not included in this value. - - - - - The 8-bit type of the frame. The frame type determines the format and semantics of - the frame. Implementations MUST ignore and discard any frame that has a type that - is unknown. - - - - - An 8-bit field reserved for frame-type specific boolean flags. - - - Flags are assigned semantics specific to the indicated frame type. Flags that have - no defined semantics for a particular frame type MUST be ignored, and MUST be left - unset (0) when sending. - - - - - A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST - remain unset (0) when sending and MUST be ignored when receiving. - - - - - A 31-bit stream identifier (see ). The value 0 is - reserved for frames that are associated with the connection as a whole as opposed to - an individual stream. - - - - - - The structure and content of the frame payload is dependent entirely on the frame type. - -
- -
- - The size of a frame payload is limited by the maximum size that a receiver advertises in - the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value - between 214 (16,384) and 224-1 (16,777,215) octets, - inclusive. - - - All implementations MUST be capable of receiving and minimally processing frames up to - 214 octets in length, plus the 9 octet frame - header. The size of the frame header is not included when describing frame sizes. - - - Certain frame types, such as PING, impose additional limits - on the amount of payload data allowed. - - - - - If a frame size exceeds any defined limit, or is too small to contain mandatory frame - data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error - in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying - a header block (that is, HEADERS, - PUSH_PROMISE, and CONTINUATION), SETTINGS, - and any WINDOW_UPDATE frame with a stream identifier of 0. - - - Endpoints are not obligated to use all available space in a frame. Responsiveness can be - improved by using frames that are smaller than the permitted maximum size. Sending large - frames can result in delays in sending time-sensitive frames (such - RST_STREAM, WINDOW_UPDATE, or PRIORITY) - which if blocked by the transmission of a large frame, could affect performance. - -
- -
- - Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values. - They are used within HTTP request and response messages as well as server push operations - (see ). - - - Header lists are collections of zero or more header fields. When transmitted over a - connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then - divided into one or more octet sequences, called header block fragments, and transmitted - within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames. - - - The Cookie header field is treated specially by the HTTP - mapping (see ). - - - A receiving endpoint reassembles the header block by concatenating its fragments, then - decompresses the block to reconstruct the header list. - - - A complete header block consists of either: - - - a single HEADERS or PUSH_PROMISE frame, - with the END_HEADERS flag set, or - - - a HEADERS or PUSH_PROMISE frame with the END_HEADERS - flag cleared and one or more CONTINUATION frames, - where the last CONTINUATION frame has the END_HEADERS flag set. - - - - - Header compression is stateful. One compression context and one decompression context is - used for the entire connection. Each header block is processed as a discrete unit. - Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved - frames of any other type or from any other stream. The last frame in a sequence of - HEADERS or CONTINUATION frames MUST have the END_HEADERS - flag set. The last frame in a sequence of PUSH_PROMISE or - CONTINUATION frames MUST have the END_HEADERS flag set. This allows a - header block to be logically equivalent to a single frame. - - - Header block fragments can only be sent as the payload of HEADERS, - PUSH_PROMISE or CONTINUATION frames, because these frames - carry data that can modify the compression context maintained by a receiver. An endpoint - receiving HEADERS, PUSH_PROMISE or - CONTINUATION frames MUST reassemble header blocks and perform decompression - even if the frames are to be discarded. A receiver MUST terminate the connection with a - connection error of type - COMPRESSION_ERROR if it does not decompress a header block. - -
-
- -
- - A "stream" is an independent, bi-directional sequence of frames exchanged between the client - and server within an HTTP/2 connection. Streams have several important characteristics: - - - A single HTTP/2 connection can contain multiple concurrently open streams, with either - endpoint interleaving frames from multiple streams. - - - Streams can be established and used unilaterally or shared by either the client or - server. - - - Streams can be closed by either endpoint. - - - The order in which frames are sent on a stream is significant. Recipients process frames - in the order they are received. In particular, the order of HEADERS, - and DATA frames is semantically significant. - - - Streams are identified by an integer. Stream identifiers are assigned to streams by the - endpoint initiating the stream. - - - - -
- - The lifecycle of a stream is shown in . - - -
- - | |<-----------' | - | R | closed | R | - `-------------------->| |<--------------------' - +--------+ - - H: HEADERS frame (with implied CONTINUATIONs) - PP: PUSH_PROMISE frame (with implied CONTINUATIONs) - ES: END_STREAM flag - R: RST_STREAM frame -]]> - -
- - - Note that this diagram shows stream state transitions and the frames and flags that affect - those transitions only. In this regard, CONTINUATION frames do not result - in state transitions; they are effectively part of the HEADERS or - PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is - processed as a separate event to the frame that bears it; a HEADERS frame - with the END_STREAM flag set can cause two state transitions. - - - Both endpoints have a subjective view of the state of a stream that could be different - when frames are in transit. Endpoints do not coordinate the creation of streams; they are - created unilaterally by either endpoint. The negative consequences of a mismatch in - states are limited to the "closed" state after sending RST_STREAM, where - frames might be received for some time after closing. - - - Streams have the following states: - - - - - - All streams start in the "idle" state. In this state, no frames have been - exchanged. - - - The following transitions are valid from this state: - - - Sending or receiving a HEADERS frame causes the stream to become - "open". The stream identifier is selected as described in . The same HEADERS frame can also - cause a stream to immediately become "half closed". - - - Sending a PUSH_PROMISE frame marks the associated stream for - later use. The stream state for the reserved stream transitions to "reserved - (local)". - - - Receiving a PUSH_PROMISE frame marks the associated stream as - reserved by the remote peer. The state of the stream becomes "reserved - (remote)". - - - - - Receiving any frames other than HEADERS or - PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - - - A stream in the "reserved (local)" state is one that has been promised by sending a - PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an - idle stream by associating the stream with an open stream that was initiated by the - remote peer (see ). - - - In this state, only the following transitions are possible: - - - The endpoint can send a HEADERS frame. This causes the stream to - open in a "half closed (remote)" state. - - - Either endpoint can send a RST_STREAM frame to cause the stream - to become "closed". This releases the stream reservation. - - - - - An endpoint MUST NOT send any type of frame other than HEADERS or - RST_STREAM in this state. - - - A PRIORITY frame MAY be received in this state. Receiving any type - of frame other than RST_STREAM or PRIORITY on a stream - in this state MUST be treated as a connection - error of type PROTOCOL_ERROR. - - - - - - - A stream in the "reserved (remote)" state has been reserved by a remote peer. - - - In this state, only the following transitions are possible: - - - Receiving a HEADERS frame causes the stream to transition to - "half closed (local)". - - - Either endpoint can send a RST_STREAM frame to cause the stream - to become "closed". This releases the stream reservation. - - - - - An endpoint MAY send a PRIORITY frame in this state to reprioritize - the reserved stream. An endpoint MUST NOT send any type of frame other than - RST_STREAM, WINDOW_UPDATE, or PRIORITY - in this state. - - - Receiving any type of frame other than HEADERS or - RST_STREAM on a stream in this state MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - - - A stream in the "open" state may be used by both peers to send frames of any type. - In this state, sending peers observe advertised stream - level flow control limits. - - - From this state either endpoint can send a frame with an END_STREAM flag set, which - causes the stream to transition into one of the "half closed" states: an endpoint - sending an END_STREAM flag causes the stream state to become "half closed (local)"; - an endpoint receiving an END_STREAM flag causes the stream state to become "half - closed (remote)". - - - Either endpoint can send a RST_STREAM frame from this state, causing - it to transition immediately to "closed". - - - - - - - A stream that is in the "half closed (local)" state cannot be used for sending - frames. Only WINDOW_UPDATE, PRIORITY and - RST_STREAM frames can be sent in this state. - - - A stream transitions from this state to "closed" when a frame that contains an - END_STREAM flag is received, or when either peer sends a RST_STREAM - frame. - - - A receiver can ignore WINDOW_UPDATE frames in this state, which might - arrive for a short period after a frame bearing the END_STREAM flag is sent. - - - PRIORITY frames received in this state are used to reprioritize - streams that depend on the current stream. - - - - - - - A stream that is "half closed (remote)" is no longer being used by the peer to send - frames. In this state, an endpoint is no longer obligated to maintain a receiver - flow control window if it performs flow control. - - - If an endpoint receives additional frames for a stream that is in this state, other - than WINDOW_UPDATE, PRIORITY or - RST_STREAM, it MUST respond with a stream error of type - STREAM_CLOSED. - - - A stream that is "half closed (remote)" can be used by the endpoint to send frames - of any type. In this state, the endpoint continues to observe advertised stream level flow control limits. - - - A stream can transition from this state to "closed" by sending a frame that contains - an END_STREAM flag, or when either peer sends a RST_STREAM frame. - - - - - - - The "closed" state is the terminal state. - - - An endpoint MUST NOT send frames other than PRIORITY on a closed - stream. An endpoint that receives any frame other than PRIORITY - after receiving a RST_STREAM MUST treat that as a stream error of type - STREAM_CLOSED. Similarly, an endpoint that receives any frames after - receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type - STREAM_CLOSED, unless the frame is permitted as described below. - - - WINDOW_UPDATE or RST_STREAM frames can be received in - this state for a short period after a DATA or HEADERS - frame containing an END_STREAM flag is sent. Until the remote peer receives and - processes RST_STREAM or the frame bearing the END_STREAM flag, it - might send frames of these types. Endpoints MUST ignore - WINDOW_UPDATE or RST_STREAM frames received in this - state, though endpoints MAY choose to treat frames that arrive a significant time - after sending END_STREAM as a connection - error of type PROTOCOL_ERROR. - - - PRIORITY frames can be sent on closed streams to prioritize streams - that are dependent on the closed stream. Endpoints SHOULD process - PRIORITY frame, though they can be ignored if the stream has been - removed from the dependency tree (see ). - - - If this state is reached as a result of sending a RST_STREAM frame, - the peer that receives the RST_STREAM might have already sent - or - enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint - MUST ignore frames that it receives on closed streams after it has sent a - RST_STREAM frame. An endpoint MAY choose to limit the period over - which it ignores frames and treat frames that arrive after this time as being in - error. - - - Flow controlled frames (i.e., DATA) received after sending - RST_STREAM are counted toward the connection flow control window. - Even though these frames might be ignored, because they are sent before the sender - receives the RST_STREAM, the sender will consider the frames to count - against the flow control window. - - - An endpoint might receive a PUSH_PROMISE frame after it sends - RST_STREAM. PUSH_PROMISE causes a stream to become - "reserved" even if the associated stream has been reset. Therefore, a - RST_STREAM is needed to close an unwanted promised stream. - - - - - - In the absence of more specific guidance elsewhere in this document, implementations - SHOULD treat the receipt of a frame that is not expressly permitted in the description of - a state as a connection error of type - PROTOCOL_ERROR. Frame of unknown types are ignored. - - - An example of the state transitions for an HTTP request/response exchange can be found in - . An example of the state transitions for server push can be - found in and . - - -
- - Streams are identified with an unsigned 31-bit integer. Streams initiated by a client - MUST use odd-numbered stream identifiers; those initiated by the server MUST use - even-numbered stream identifiers. A stream identifier of zero (0x0) is used for - connection control messages; the stream identifier zero cannot be used to establish a - new stream. - - - HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are - responded to with a stream identifier of one (0x1). After the upgrade - completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1 - cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1. - - - The identifier of a newly established stream MUST be numerically greater than all - streams that the initiating endpoint has opened or reserved. This governs streams that - are opened using a HEADERS frame and streams that are reserved using - PUSH_PROMISE. An endpoint that receives an unexpected stream identifier - MUST respond with a connection error of - type PROTOCOL_ERROR. - - - The first use of a new stream identifier implicitly closes all streams in the "idle" - state that might have been initiated by that peer with a lower-valued stream identifier. - For example, if a client sends a HEADERS frame on stream 7 without ever - sending a frame on stream 5, then stream 5 transitions to the "closed" state when the - first frame for stream 7 is sent or received. - - - Stream identifiers cannot be reused. Long-lived connections can result in an endpoint - exhausting the available range of stream identifiers. A client that is unable to - establish a new stream identifier can establish a new connection for new streams. A - server that is unable to establish a new stream identifier can send a - GOAWAY frame so that the client is forced to open a new connection for - new streams. - -
- -
- - A peer can limit the number of concurrently active streams using the - SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent - streams setting is specific to each endpoint and applies only to the peer that receives - the setting. That is, clients specify the maximum number of concurrent streams the - server can initiate, and servers specify the maximum number of concurrent streams the - client can initiate. - - - Streams that are in the "open" state, or either of the "half closed" states count toward - the maximum number of streams that an endpoint is permitted to open. Streams in any of - these three states count toward the limit advertised in the - SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the - "reserved" states do not count toward the stream limit. - - - Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a - HEADERS frame that causes their advertised concurrent stream limit to be - exceeded MUST treat this as a stream error. An - endpoint that wishes to reduce the value of - SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current - number of open streams can either close streams that exceed the new value or allow - streams to complete. - -
-
- -
- - Using streams for multiplexing introduces contention over use of the TCP connection, - resulting in blocked streams. A flow control scheme ensures that streams on the same - connection do not destructively interfere with each other. Flow control is used for both - individual streams and for the connection as a whole. - - - HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame. - - -
- - HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be - used without requiring protocol changes. Flow control in HTTP/2 has the following - characteristics: - - - Flow control is specific to a connection; i.e., it is "hop-by-hop", not - "end-to-end". - - - Flow control is based on window update frames. Receivers advertise how many octets - they are prepared to receive on a stream and for the entire connection. This is a - credit-based scheme. - - - Flow control is directional with overall control provided by the receiver. A - receiver MAY choose to set any window size that it desires for each stream and for - the entire connection. A sender MUST respect flow control limits imposed by a - receiver. Clients, servers and intermediaries all independently advertise their - flow control window as a receiver and abide by the flow control limits set by - their peer when sending. - - - The initial value for the flow control window is 65,535 octets for both new streams - and the overall connection. - - - The frame type determines whether flow control applies to a frame. Of the frames - specified in this document, only DATA frames are subject to flow - control; all other frame types do not consume space in the advertised flow control - window. This ensures that important control frames are not blocked by flow control. - - - Flow control cannot be disabled. - - - HTTP/2 defines only the format and semantics of the WINDOW_UPDATE - frame (). This document does not stipulate how a - receiver decides when to send this frame or the value that it sends, nor does it - specify how a sender chooses to send packets. Implementations are able to select - any algorithm that suits their needs. - - - - - Implementations are also responsible for managing how requests and responses are sent - based on priority; choosing how to avoid head of line blocking for requests; and - managing the creation of new streams. Algorithm choices for these could interact with - any flow control algorithm. - -
- -
- - Flow control is defined to protect endpoints that are operating under resource - constraints. For example, a proxy needs to share memory between many connections, and - also might have a slow upstream connection and a fast downstream one. Flow control - addresses cases where the receiver is unable process data on one stream, yet wants to - continue to process other streams in the same connection. - - - Deployments that do not require this capability can advertise a flow control window of - the maximum size, incrementing the available space when new data is received. This - effectively disables flow control for that receiver. Conversely, a sender is always - subject to the flow control window advertised by the receiver. - - - Deployments with constrained resources (for example, memory) can employ flow control to - limit the amount of memory a peer can consume. Note, however, that this can lead to - suboptimal use of available network resources if flow control is enabled without - knowledge of the bandwidth-delay product (see ). - - - Even with full awareness of the current bandwidth-delay product, implementation of flow - control can be difficult. When using flow control, the receiver MUST read from the TCP - receive buffer in a timely fashion. Failure to do so could lead to a deadlock when - critical frames, such as WINDOW_UPDATE, are not read and acted upon. - -
-
- -
- - A client can assign a priority for a new stream by including prioritization information in - the HEADERS frame that opens the stream. For an existing - stream, the PRIORITY frame can be used to change the - priority. - - - The purpose of prioritization is to allow an endpoint to express how it would prefer its - peer allocate resources when managing concurrent streams. Most importantly, priority can - be used to select streams for transmitting frames when there is limited capacity for - sending. - - - Streams can be prioritized by marking them as dependent on the completion of other streams - (). Each dependency is assigned a relative weight, a number - that is used to determine the relative proportion of available resources that are assigned - to streams dependent on the same stream. - - - - Explicitly setting the priority for a stream is input to a prioritization process. It - does not guarantee any particular processing or transmission order for the stream relative - to any other stream. An endpoint cannot force a peer to process concurrent streams in a - particular order using priority. Expressing priority is therefore only ever a suggestion. - - - Providing prioritization information is optional, so default values are used if no - explicit indicator is provided (). - - -
- - Each stream can be given an explicit dependency on another stream. Including a - dependency expresses a preference to allocate resources to the identified stream rather - than to the dependent stream. - - - A stream that is not dependent on any other stream is given a stream dependency of 0x0. - In other words, the non-existent stream 0 forms the root of the tree. - - - A stream that depends on another stream is a dependent stream. The stream upon which a - stream is dependent is a parent stream. A dependency on a stream that is not currently - in the tree - such as a stream in the "idle" state - results in that stream being given - a default priority. - - - When assigning a dependency on another stream, the stream is added as a new dependency - of the parent stream. Dependent streams that share the same parent are not ordered with - respect to each other. For example, if streams B and C are dependent on stream A, and - if stream D is created with a dependency on stream A, this results in a dependency order - of A followed by B, C, and D in any order. - -
- /|\ - B C B D C -]]> -
- - An exclusive flag allows for the insertion of a new level of dependencies. The - exclusive flag causes the stream to become the sole dependency of its parent stream, - causing other dependencies to become dependent on the exclusive stream. In the - previous example, if stream D is created with an exclusive dependency on stream A, this - results in D becoming the dependency parent of B and C. - -
- D - B C / \ - B C -]]> -
- - Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all - of the streams that it depends on (the chain of parent streams up to 0x0) are either - closed, or it is not possible to make progress on them. - - - A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. - -
- -
- - All dependent streams are allocated an integer weight between 1 and 256 (inclusive). - - - Streams with the same parent SHOULD be allocated resources proportionally based on their - weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A - with weight 12, and if no progress can be made on A, stream B ideally receives one third - of the resources allocated to stream C. - -
- -
- - Stream priorities are changed using the PRIORITY frame. Setting a - dependency causes a stream to become dependent on the identified parent stream. - - - Dependent streams move with their parent stream if the parent is reprioritized. Setting - a dependency with the exclusive flag for a reprioritized stream moves all the - dependencies of the new parent stream to become dependent on the reprioritized stream. - - - If a stream is made dependent on one of its own dependencies, the formerly dependent - stream is first moved to be dependent on the reprioritized stream's previous parent. - The moved dependency retains its weight. - -
- - For example, consider an original dependency tree where B and C depend on A, D and E - depend on C, and F depends on D. If A is made dependent on D, then D takes the place - of A. All other dependency relationships stay the same, except for F, which becomes - dependent on A if the reprioritization is exclusive. - - F B C ==> F A OR A - / \ | / \ /|\ - D E E B C B C F - | | | - F E E - (intermediate) (non-exclusive) (exclusive) -]]> -
-
- -
- - When a stream is removed from the dependency tree, its dependencies can be moved to - become dependent on the parent of the closed stream. The weights of new dependencies - are recalculated by distributing the weight of the dependency of the closed stream - proportionally based on the weights of its dependencies. - - - Streams that are removed from the dependency tree cause some prioritization information - to be lost. Resources are shared between streams with the same parent stream, which - means that if a stream in that set closes or becomes blocked, any spare capacity - allocated to a stream is distributed to the immediate neighbors of the stream. However, - if the common dependency is removed from the tree, those streams share resources with - streams at the next highest level. - - - For example, assume streams A and B share a parent, and streams C and D both depend on - stream A. Prior to the removal of stream A, if streams A and D are unable to proceed, - then stream C receives all the resources dedicated to stream A. If stream A is removed - from the tree, the weight of stream A is divided between streams C and D. If stream D - is still unable to proceed, this results in stream C receiving a reduced proportion of - resources. For equal starting weights, C receives one third, rather than one half, of - available resources. - - - It is possible for a stream to become closed while prioritization information that - creates a dependency on that stream is in transit. If a stream identified in a - dependency has no associated priority information, then the dependent stream is instead - assigned a default priority. This potentially creates - suboptimal prioritization, since the stream could be given a priority that is different - to what is intended. - - - To avoid these problems, an endpoint SHOULD retain stream prioritization state for a - period after streams become closed. The longer state is retained, the lower the chance - that streams are assigned incorrect or default priority values. - - - This could create a large state burden for an endpoint, so this state MAY be limited. - An endpoint MAY apply a fixed upper limit on the number of closed streams for which - prioritization state is tracked to limit state exposure. The amount of additional state - an endpoint maintains could be dependent on load; under high load, prioritization state - can be discarded to limit resource commitments. In extreme cases, an endpoint could - even discard prioritization state for active or reserved streams. If a fixed limit is - applied, endpoints SHOULD maintain state for at least as many streams as allowed by - their setting for SETTINGS_MAX_CONCURRENT_STREAMS. - - - An endpoint receiving a PRIORITY frame that changes the priority of a - closed stream SHOULD alter the dependencies of the streams that depend on it, if it has - retained enough state to do so. - -
- -
- - Providing priority information is optional. Streams are assigned a non-exclusive - dependency on stream 0x0 by default. Pushed streams - initially depend on their associated stream. In both cases, streams are assigned a - default weight of 16. - -
-
- -
- - HTTP/2 framing permits two classes of error: - - - An error condition that renders the entire connection unusable is a connection error. - - - An error in an individual stream is a stream error. - - - - - A list of error codes is included in . - - -
- - A connection error is any error which prevents further processing of the framing layer, - or which corrupts any connection state. - - - An endpoint that encounters a connection error SHOULD first send a GOAWAY - frame () with the stream identifier of the last stream that it - successfully received from its peer. The GOAWAY frame includes an error - code that indicates why the connection is terminating. After sending the - GOAWAY frame, the endpoint MUST close the TCP connection. - - - It is possible that the GOAWAY will not be reliably received by the - receiving endpoint (see ). In the event of a connection error, - GOAWAY only provides a best effort attempt to communicate with the peer - about why the connection is being terminated. - - - An endpoint can end a connection at any time. In particular, an endpoint MAY choose to - treat a stream error as a connection error. Endpoints SHOULD send a - GOAWAY frame when ending a connection, providing that circumstances - permit it. - -
- -
- - A stream error is an error related to a specific stream that does not affect processing - of other streams. - - - An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error - occurred. The RST_STREAM frame includes an error code that indicates the - type of error. - - - A RST_STREAM is the last frame that an endpoint can send on a stream. - The peer that sends the RST_STREAM frame MUST be prepared to receive any - frames that were sent or enqueued for sending by the remote peer. These frames can be - ignored, except where they modify connection state (such as the state maintained for - header compression, or flow control). - - - Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for - any stream. However, an endpoint MAY send additional RST_STREAM frames if - it receives frames on a closed stream after more than a round-trip time. This behavior - is permitted to deal with misbehaving implementations. - - - An endpoint MUST NOT send a RST_STREAM in response to an - RST_STREAM frame, to avoid looping. - -
- -
- - If the TCP connection is closed or reset while streams remain in open or half closed - states, then the endpoint MUST assume that those streams were abnormally interrupted and - could be incomplete. - -
-
- -
- - HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide - additional services or alter any aspect of the protocol, within the limitations described - in this section. Extensions are effective only within the scope of a single HTTP/2 - connection. - - - Extensions are permitted to use new frame types, new - settings, or new error - codes. Registries are established for managing these extension points: frame types, settings and - error codes. - - - Implementations MUST ignore unknown or unsupported values in all extensible protocol - elements. Implementations MUST discard frames that have unknown or unsupported types. - This means that any of these extension points can be safely used by extensions without - prior arrangement or negotiation. However, extension frames that appear in the middle of - a header block are not permitted; these MUST be treated - as a connection error of type - PROTOCOL_ERROR. - - - However, extensions that could change the semantics of existing protocol components MUST - be negotiated before being used. For example, an extension that changes the layout of the - HEADERS frame cannot be used until the peer has given a positive signal - that this is acceptable. In this case, it could also be necessary to coordinate when the - revised layout comes into effect. Note that treating any frame other than - DATA frames as flow controlled is such a change in semantics, and can only - be done through negotiation. - - - This document doesn't mandate a specific method for negotiating the use of an extension, - but notes that a setting could be used for that - purpose. If both peers set a value that indicates willingness to use the extension, then - the extension can be used. If a setting is used for extension negotiation, the initial - value MUST be defined so that the extension is initially disabled. - -
-
- -
- - This specification defines a number of frame types, each identified by a unique 8-bit type - code. Each frame type serves a distinct purpose either in the establishment and management - of the connection as a whole, or of individual streams. - - - The transmission of specific frame types can alter the state of a connection. If endpoints - fail to maintain a synchronized view of the connection state, successful communication - within the connection will no longer be possible. Therefore, it is important that endpoints - have a shared comprehension of how the state is affected by the use any given frame. - - -
- - DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated - with a stream. One or more DATA frames are used, for instance, to carry HTTP request or - response payloads. - - - DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to - obscure the size of messages. - -
- -
- - The DATA frame contains the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is optional and is only present if the PADDED flag is set. - - - Application data. The amount of data is the remainder of the frame payload after - subtracting the length of the other fields that are present. - - - Padding octets that contain no application semantic value. Padding octets MUST be set - to zero when sending and ignored when receiving. - - - - - - The DATA frame defines the following flags: - - - Bit 1 being set indicates that this frame is the last that the endpoint will send for - the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state. - - - Bit 4 being set indicates that the Pad Length field and any padding that it describes - is present. - - - - - DATA frames MUST be associated with a stream. If a DATA frame is received whose stream - identifier field is 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - DATA frames are subject to flow control and can only be sent when a stream is in the - "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow - control, including Pad Length and Padding fields if present. If a DATA frame is received - whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond - with a stream error of type - STREAM_CLOSED. - - - The total number of padding octets is determined by the value of the Pad Length field. If - the length of the padding is greater than the length of the frame payload, the recipient - MUST treat this as a connection error of - type PROTOCOL_ERROR. - - - A frame can be increased in size by one octet by including a Pad Length field with a - value of zero. - - - - - Padding is a security feature; see . - -
- -
- - The HEADERS frame (type=0x1) is used to open a stream, - and additionally carries a header block fragment. HEADERS frames can be sent on a stream - in the "open" or "half closed (remote)" states. - -
- -
- - The HEADERS frame payload has the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is only present if the PADDED flag is set. - - - A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set. - - - A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set. - - - An 8-bit weight for the stream, see . Add one to the - value to obtain a weight between 1 and 256. This field is only present if the - PRIORITY flag is set. - - - A header block fragment. - - - Padding octets that contain no application semantic value. Padding octets MUST be set - to zero when sending and ignored when receiving. - - - - - - The HEADERS frame defines the following flags: - - - - Bit 1 being set indicates that the header block is - the last that the endpoint will send for the identified stream. Setting this flag - causes the stream to enter one of "half closed" - states. - - - A HEADERS frame carries the END_STREAM flag that signals the end of a stream. - However, a HEADERS frame with the END_STREAM flag set can be followed by - CONTINUATION frames on the same stream. Logically, the - CONTINUATION frames are part of the HEADERS frame. - - - - - Bit 3 being set indicates that this frame contains an entire header block and is not followed by any - CONTINUATION frames. - - - A HEADERS frame without the END_HEADERS flag set MUST be followed by a - CONTINUATION frame for the same stream. A receiver MUST treat the - receipt of any other type of frame or a frame on a different stream as a connection error of type - PROTOCOL_ERROR. - - - - - Bit 4 being set indicates that the Pad Length field and any padding that it - describes is present. - - - - - Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight - fields are present; see . - - - - - - - The payload of a HEADERS frame contains a header block - fragment. A header block that does not fit within a HEADERS frame is continued in - a CONTINUATION frame. - - - - HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose - stream identifier field is 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - - The HEADERS frame changes the connection state as described in . - - - - The HEADERS frame includes optional padding. Padding fields and flags are identical to - those defined for DATA frames. - - - Prioritization information in a HEADERS frame is logically equivalent to a separate - PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in - stream prioritization when new streams are created. Priorization fields in HEADERS frames - subsequent to the first on a stream reprioritize the - stream. - -
- -
- - The PRIORITY frame (type=0x2) specifies the sender-advised - priority of a stream. It can be sent at any time for an existing stream, including - closed streams. This enables reprioritization of existing streams. - -
- -
- - The payload of a PRIORITY frame contains the following fields: - - - A single bit flag indicates that the stream dependency is exclusive, see . - - - A 31-bit stream identifier for the stream that this stream depends on, see . - - - An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256. - - - - - - The PRIORITY frame does not define any flags. - - - - The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received - with a stream identifier of 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open", - "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be - sent between consecutive frames that comprise a single header - block. Note that this frame could arrive after processing or frame sending has - completed, which would cause it to have no effect on the current stream. For a stream - that is in the "half closed (remote)" or "closed" - state, this frame can only affect - processing of the current stream and not frame transmission. - - - The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state. - This allows for the reprioritization of a group of dependent streams by altering the - priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a - closed stream risks being ignored due to the peer having discarded priority state - information for that stream. - -
- -
- - The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by - the initiator of a stream, it indicates that they wish to cancel the stream or that an - error condition has occurred. When sent by the receiver of a stream, it indicates that - either the receiver is rejecting the stream, requesting that the stream be cancelled, or - that an error condition has occurred. - -
- -
- - - The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being - terminated. - - - - The RST_STREAM frame does not define any flags. - - - - The RST_STREAM frame fully terminates the referenced stream and causes it to enter the - closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send - additional frames for that stream, with the exception of PRIORITY. However, - after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process - additional frames sent on the stream that might have been sent by the peer prior to the - arrival of the RST_STREAM. - - - - RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received - with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type - PROTOCOL_ERROR. - - - - RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM - frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type - PROTOCOL_ERROR. - - -
- -
- - The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints - communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is - also used to acknowledge the receipt of those parameters. Individually, a SETTINGS - parameter can also be referred to as a "setting". - - - SETTINGS parameters are not negotiated; they describe characteristics of the sending peer, - which are used by the receiving peer. Different values for the same parameter can be - advertised by each peer. For example, a client might set a high initial flow control - window, whereas a server might set a lower value to conserve resources. - - - - A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be - sent at any other time by either endpoint over the lifetime of the connection. - Implementations MUST support all of the parameters defined by this specification. - - - - Each parameter in a SETTINGS frame replaces any existing value for that parameter. - Parameters are processed in the order in which they appear, and a receiver of a SETTINGS - frame does not need to maintain any state other than the current value of its - parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by - a receiver. - - - SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS - frame defines the following flag: - - - Bit 1 being set indicates that this frame acknowledges receipt and application of the - peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST - be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value - other than 0 MUST be treated as a connection - error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization. - - - - - SETTINGS frames always apply to a connection, never a single stream. The stream - identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS - frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond - with a connection error of type - PROTOCOL_ERROR. - - - The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame - MUST be treated as a connection error of type - PROTOCOL_ERROR. - - -
- - The payload of a SETTINGS frame consists of zero or more parameters, each consisting of - an unsigned 16-bit setting identifier and an unsigned 32-bit value. - - -
- -
-
- -
- - The following parameters are defined: - - - - Allows the sender to inform the remote endpoint of the maximum size of the header - compression table used to decode header blocks, in octets. The encoder can select - any size equal to or less than this value by using signaling specific to the - header compression format inside a header block. The initial value is 4,096 - octets. - - - - - This setting can be use to disable server - push. An endpoint MUST NOT send a PUSH_PROMISE frame if it - receives this parameter set to a value of 0. An endpoint that has both set this - parameter to 0 and had it acknowledged MUST treat the receipt of a - PUSH_PROMISE frame as a connection error of type - PROTOCOL_ERROR. - - - The initial value is 1, which indicates that server push is permitted. Any value - other than 0 or 1 MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - Indicates the maximum number of concurrent streams that the sender will allow. - This limit is directional: it applies to the number of streams that the sender - permits the receiver to create. Initially there is no limit to this value. It is - recommended that this value be no smaller than 100, so as to not unnecessarily - limit parallelism. - - - A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special - by endpoints. A zero value does prevent the creation of new streams, however this - can also happen for any limit that is exhausted with active streams. Servers - SHOULD only set a zero value for short durations; if a server does not wish to - accept requests, closing the connection could be preferable. - - - - - Indicates the sender's initial window size (in octets) for stream level flow - control. The initial value is 216-1 (65,535) octets. - - - This setting affects the window size of all streams, including existing streams, - see . - - - Values above the maximum flow control window size of 231-1 MUST - be treated as a connection error of - type FLOW_CONTROL_ERROR. - - - - - Indicates the size of the largest frame payload that the sender is willing to - receive, in octets. - - - The initial value is 214 (16,384) octets. The value advertised by - an endpoint MUST be between this initial value and the maximum allowed frame size - (224-1 or 16,777,215 octets), inclusive. Values outside this range - MUST be treated as a connection error - of type PROTOCOL_ERROR. - - - - - This advisory setting informs a peer of the maximum size of header list that the - sender is prepared to accept, in octets. The value is based on the uncompressed - size of header fields, including the length of the name and value in octets plus - an overhead of 32 octets for each header field. - - - For any given request, a lower limit than what is advertised MAY be enforced. The - initial value of this setting is unlimited. - - - - - - An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier - MUST ignore that setting. - -
- -
- - Most values in SETTINGS benefit from or require an understanding of when the peer has - received and applied the changed parameter values. In order to provide - such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag - is not set MUST apply the updated parameters as soon as possible upon receipt. - - - The values in the SETTINGS frame MUST be processed in the order they appear, with no - other frame processing between values. Unsupported parameters MUST be ignored. Once - all values have been processed, the recipient MUST immediately emit a SETTINGS frame - with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender - of the altered parameters can rely on the setting having been applied. - - - If the sender of a SETTINGS frame does not receive an acknowledgement within a - reasonable amount of time, it MAY issue a connection error of type - SETTINGS_TIMEOUT. - -
-
- -
- - The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of - streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned - 31-bit identifier of the stream the endpoint plans to create along with a set of headers - that provide additional context for the stream. contains a - thorough description of the use of PUSH_PROMISE frames. - - -
- -
- - The PUSH_PROMISE frame payload has the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is only present if the PADDED flag is set. - - - A single reserved bit. - - - An unsigned 31-bit integer that identifies the stream that is reserved by the - PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next - stream sent by the sender (see new stream - identifier). - - - A header block fragment containing request header - fields. - - - Padding octets. - - - - - - The PUSH_PROMISE frame defines the following flags: - - - - Bit 3 being set indicates that this frame contains an entire header block and is not followed by any - CONTINUATION frames. - - - A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a - CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any - other type of frame or a frame on a different stream as a connection error of type - PROTOCOL_ERROR. - - - - - Bit 4 being set indicates that the Pad Length field and any padding that it - describes is present. - - - - - - - PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream - identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the - stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - - Promised streams are not required to be used in the order they are promised. The - PUSH_PROMISE only reserves stream identifiers for later use. - - - - PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the - peer endpoint is set to 0. An endpoint that has set this setting and has received - acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type - PROTOCOL_ERROR. - - - Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a - RST_STREAM referencing the promised stream identifier back to the sender of - the PUSH_PROMISE. - - - - A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for - header compression. PUSH_PROMISE also reserves a stream for later use, causing the - promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a - stream unless that stream is either "open" or "half closed (remote)"; the sender MUST - ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST - be in the "idle" state). - - - Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream - state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a - stream that is neither "open" nor "half closed (local)" as a connection error of type - PROTOCOL_ERROR. However, an endpoint that has sent - RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that - might have been created before the RST_STREAM frame is received and - processed. - - - A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a - stream that is not currently in the "idle" state) as a connection error of type - PROTOCOL_ERROR. - - - - The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical - to those defined for DATA frames. - -
- -
- - The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the - sender, as well as determining whether an idle connection is still functional. PING - frames can be sent from any endpoint. - -
- -
- - - In addition to the frame header, PING frames MUST contain 8 octets of data in the payload. - A sender can include any value it chooses and use those bytes in any fashion. - - - Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with - the ACK flag set in response, with an identical payload. PING responses SHOULD be given - higher priority than any other frame. - - - - The PING frame defines the following flags: - - - Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST - set this flag in PING responses. An endpoint MUST NOT respond to PING frames - containing this flag. - - - - - PING frames are not associated with any individual stream. If a PING frame is received - with a stream identifier field value other than 0x0, the recipient MUST respond with a - connection error of type - PROTOCOL_ERROR. - - - Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type - FRAME_SIZE_ERROR. - - -
- -
- - The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this - connection. GOAWAY can be sent by either the client or the server. Once sent, the sender - will ignore frames sent on any new streams with identifiers higher than the included last - stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the - connection, although a new connection can be established for new streams. - - - The purpose of this frame is to allow an endpoint to gracefully stop accepting new - streams, while still finishing processing of previously established streams. This enables - administrative actions, like server maintainance. - - - There is an inherent race condition between an endpoint starting new streams and the - remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream - identifier of the last peer-initiated stream which was or might be processed on the - sending endpoint in this connection. For instance, if the server sends a GOAWAY frame, - the identified stream is the highest numbered stream initiated by the client. - - - If the receiver of the GOAWAY has sent data on streams with a higher stream identifier - than what is indicated in the GOAWAY frame, those streams are not or will not be - processed. The receiver of the GOAWAY frame can treat the streams as though they had - never been created at all, thereby allowing those streams to be retried later on a new - connection. - - - Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote - can know whether a stream has been partially processed or not. For example, if an HTTP - client sends a POST at the same time that a server closes a connection, the client cannot - know if the server started to process that POST request if the server does not send a - GOAWAY frame to indicate what streams it might have acted on. - - - An endpoint might choose to close a connection without sending GOAWAY for misbehaving - peers. - - -
- -
- - The GOAWAY frame does not define any flags. - - - The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat - a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type - PROTOCOL_ERROR. - - - The last stream identifier in the GOAWAY frame contains the highest numbered stream - identifier for which the sender of the GOAWAY frame might have taken some action on, or - might yet take action on. All streams up to and including the identified stream might - have been processed in some way. The last stream identifier can be set to 0 if no streams - were processed. - - - In this context, "processed" means that some data from the stream was passed to some - higher layer of software that might have taken some action as a result. - - - If a connection terminates without a GOAWAY frame, the last stream identifier is - effectively the highest possible stream identifier. - - - On streams with lower or equal numbered identifiers that were not closed completely prior - to the connection being closed, re-attempting requests, transactions, or any protocol - activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or - DELETE. Any protocol activity that uses higher numbered streams can be safely retried - using a new connection. - - - Activity on streams numbered lower or equal to the last stream identifier might still - complete successfully. The sender of a GOAWAY frame might gracefully shut down a - connection by sending a GOAWAY frame, maintaining the connection in an open state until - all in-progress streams complete. - - - An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an - endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could - subsequently encounter an condition that requires immediate termination of the connection. - The last stream identifier from the last GOAWAY frame received indicates which streams - could have been acted upon. Endpoints MUST NOT increase the value they send in the last - stream identifier, since the peers might already have retried unprocessed requests on - another connection. - - - A client that is unable to retry requests loses all requests that are in flight when the - server closes the connection. This is especially true for intermediaries that might - not be serving clients using HTTP/2. A server that is attempting to gracefully shut down - a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to - 231-1 and a NO_ERROR code. This signals to the client that - a shutdown is imminent and that no further requests can be initiated. After waiting at - least one round trip time, the server can send another GOAWAY frame with an updated last - stream identifier. This ensures that a connection can be cleanly shut down without losing - requests. - - - - After sending a GOAWAY frame, the sender can discard frames for streams with identifiers - higher than the identified last stream. However, any frames that alter connection state - cannot be completely ignored. For instance, HEADERS, - PUSH_PROMISE and CONTINUATION frames MUST be minimally - processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow - control window. Failure to process these frames can cause flow control or header - compression state to become unsynchronized. - - - - The GOAWAY frame also contains a 32-bit error code that - contains the reason for closing the connection. - - - Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug - data is intended for diagnostic purposes only and carries no semantic value. Debug - information could contain security- or privacy-sensitive data. Logged or otherwise - persistently stored debug data MUST have adequate safeguards to prevent unauthorized - access. - -
- -
- - The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview. - - - Flow control operates at two levels: on each individual stream and on the entire - connection. - - - Both types of flow control are hop-by-hop; that is, only between the two endpoints. - Intermediaries do not forward WINDOW_UPDATE frames between dependent connections. - However, throttling of data transfer by any receiver can indirectly cause the propagation - of flow control information toward the original sender. - - - Flow control only applies to frames that are identified as being subject to flow control. - Of the frame types defined in this document, this includes only DATA frames. - Frames that are exempt from flow control MUST be accepted and processed, unless the - receiver is unable to assign resources to handling the frame. A receiver MAY respond with - a stream error or connection error of type - FLOW_CONTROL_ERROR if it is unable to accept a frame. - -
- -
- - The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer - indicating the number of octets that the sender can transmit in addition to the existing - flow control window. The legal range for the increment to the flow control window is 1 to - 231-1 (0x7fffffff) octets. - - - The WINDOW_UPDATE frame does not define any flags. - - - The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the - former case, the frame's stream identifier indicates the affected stream; in the latter, - the value "0" indicates that the entire connection is the subject of the frame. - - - A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window - increment of 0 as a stream error of type - PROTOCOL_ERROR; errors on the connection flow control window MUST be - treated as a connection error. - - - WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag. - This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)" - or "closed" stream. A receiver MUST NOT treat this as an error, see . - - - A receiver that receives a flow controlled frame MUST always account for its contribution - against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the - frame is in error. Since the sender counts the frame toward the flow control window, if - the receiver does not, the flow control window at sender and receiver can become - different. - - -
- - Flow control in HTTP/2 is implemented using a window kept by each sender on every - stream. The flow control window is a simple integer value that indicates how many octets - of data the sender is permitted to transmit; as such, its size is a measure of the - buffering capacity of the receiver. - - - Two flow control windows are applicable: the stream flow control window and the - connection flow control window. The sender MUST NOT send a flow controlled frame with a - length that exceeds the space available in either of the flow control windows advertised - by the receiver. Frames with zero length with the END_STREAM flag set (that is, an - empty DATA frame) MAY be sent if there is no available space in either - flow control window. - - - For flow control calculations, the 9 octet frame header is not counted. - - - After sending a flow controlled frame, the sender reduces the space available in both - windows by the length of the transmitted frame. - - - The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up - space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream - and connection level flow control windows. - - - A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the - amount specified in the frame. - - - A sender MUST NOT allow a flow control window to exceed 231-1 octets. - If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this - maximum it MUST terminate either the stream or the connection, as appropriate. For - streams, the sender sends a RST_STREAM with the error code of - FLOW_CONTROL_ERROR code; for the connection, a GOAWAY - frame with a FLOW_CONTROL_ERROR code. - - - Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are - completely asynchronous with respect to each other. This property allows a receiver to - aggressively update the window size kept by the sender to prevent streams from stalling. - -
- -
- - When an HTTP/2 connection is first established, new streams are created with an initial - flow control window size of 65,535 octets. The connection flow control window is 65,535 - octets. Both endpoints can adjust the initial window size for new streams by including - a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS - frame that forms part of the connection preface. The connection flow control window can - only be changed using WINDOW_UPDATE frames. - - - Prior to receiving a SETTINGS frame that sets a value for - SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default - initial window size when sending flow controlled frames. Similarly, the connection flow - control window is set to the default initial window size until a WINDOW_UPDATE frame is - received. - - - A SETTINGS frame can alter the initial flow control window size for all - current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, - a receiver MUST adjust the size of all stream flow control windows that it maintains by - the difference between the new value and the old value. - - - A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in - a flow control window to become negative. A sender MUST track the negative flow control - window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE - frames that cause the flow control window to become positive. - - - For example, if the client sends 60KB immediately on connection establishment, and the - server sets the initial window size to be 16KB, the client will recalculate the - available flow control window to be -44KB on receipt of the SETTINGS - frame. The client retains a negative flow control window until WINDOW_UPDATE frames - restore the window to being positive, after which the client can resume sending. - - - A SETTINGS frame cannot alter the connection flow control window. - - - An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that - causes any flow control window to exceed the maximum size as a connection error of type - FLOW_CONTROL_ERROR. - -
- -
- - A receiver that wishes to use a smaller flow control window than the current size can - send a new SETTINGS frame. However, the receiver MUST be prepared to - receive data that exceeds this window size, since the sender might send data that - exceeds the lower limit prior to processing the SETTINGS frame. - - - After sending a SETTINGS frame that reduces the initial flow control window size, a - receiver has two options for handling streams that exceed flow control limits: - - - The receiver can immediately send RST_STREAM with - FLOW_CONTROL_ERROR error code for the affected streams. - - - The receiver can accept the streams and tolerate the resulting head of line - blocking, sending WINDOW_UPDATE frames as it consumes data. - - - -
-
- -
- - The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can - be sent on an existing stream, as long as the preceding frame is on the same stream and is - a HEADERS, PUSH_PROMISE or CONTINUATION frame without the - END_HEADERS flag set. - - -
- -
- - The CONTINUATION frame payload contains a header block - fragment. - - - - The CONTINUATION frame defines the following flag: - - - - Bit 3 being set indicates that this frame ends a header - block. - - - If the END_HEADERS bit is not set, this frame MUST be followed by another - CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or - a frame on a different stream as a connection - error of type PROTOCOL_ERROR. - - - - - - - The CONTINUATION frame changes the connection state as defined in . - - - - CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received - whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. - - - - A CONTINUATION frame MUST be preceded by a HEADERS, - PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A - recipient that observes violation of this rule MUST respond with a connection error of type - PROTOCOL_ERROR. - -
-
- -
- - Error codes are 32-bit fields that are used in RST_STREAM and - GOAWAY frames to convey the reasons for the stream or connection error. - - - - Error codes share a common code space. Some error codes apply only to either streams or the - entire connection and have no defined semantics in the other context. - - - - The following error codes are defined: - - - The associated condition is not as a result of an error. For example, a - GOAWAY might include this code to indicate graceful shutdown of a - connection. - - - The endpoint detected an unspecific protocol error. This error is for use when a more - specific error code is not available. - - - The endpoint encountered an unexpected internal error. - - - The endpoint detected that its peer violated the flow control protocol. - - - The endpoint sent a SETTINGS frame, but did not receive a response in a - timely manner. See Settings Synchronization. - - - The endpoint received a frame after a stream was half closed. - - - The endpoint received a frame with an invalid size. - - - The endpoint refuses the stream prior to performing any application processing, see - for details. - - - Used by the endpoint to indicate that the stream is no longer needed. - - - The endpoint is unable to maintain the header compression context for the connection. - - - The connection established in response to a CONNECT - request was reset or abnormally closed. - - - The endpoint detected that its peer is exhibiting a behavior that might be generating - excessive load. - - - The underlying transport has properties that do not meet minimum security - requirements (see ). - - - - - Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be - treated by an implementation as being equivalent to INTERNAL_ERROR. - -
- -
- - HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means - that, from the application perspective, the features of the protocol are largely - unchanged. To achieve this, all request and response semantics are preserved, although the - syntax of conveying those semantics has changed. - - - Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax - and Routing , such as the HTTP and HTTPS URI schemes, are also - applicable in HTTP/2, but the expression of those semantics for this protocol are defined - in the sections below. - - -
- - A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on - the same stream as the request. - - - An HTTP message (request or response) consists of: - - - for a response only, zero or more HEADERS frames (each followed by zero - or more CONTINUATION frames) containing the message headers of - informational (1xx) HTTP responses (see and ), - and - - - one HEADERS frame (followed by zero or more CONTINUATION - frames) containing the message headers (see ), and - - - zero or more DATA frames containing the message payload (see ), and - - - optionally, one HEADERS frame, followed by zero or more - CONTINUATION frames containing the trailer-part, if present (see ). - - - The last frame in the sequence bears an END_STREAM flag, noting that a - HEADERS frame bearing the END_STREAM flag can be followed by - CONTINUATION frames that carry any remaining portions of the header block. - - - Other frames (from any stream) MUST NOT occur between either HEADERS frame - and any CONTINUATION frames that might follow. - - - - Trailing header fields are carried in a header block that also terminates the stream. - That is, a sequence starting with a HEADERS frame, followed by zero or more - CONTINUATION frames, where the HEADERS frame bears an - END_STREAM flag. Header blocks after the first that do not terminate the stream are not - part of an HTTP request or response. - - - A HEADERS frame (and associated CONTINUATION frames) can - only appear at the start or end of a stream. An endpoint that receives a - HEADERS frame without the END_STREAM flag set after receiving a final - (non-informational) status code MUST treat the corresponding request or response as malformed. - - - - An HTTP request/response exchange fully consumes a single stream. A request starts with - the HEADERS frame that puts the stream into an "open" state. The request - ends with a frame bearing END_STREAM, which causes the stream to become "half closed - (local)" for the client and "half closed (remote)" for the server. A response starts with - a HEADERS frame and ends with a frame bearing END_STREAM, which places the - stream in the "closed" state. - - - -
- - HTTP/2 removes support for the 101 (Switching Protocols) informational status code - (). - - - The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol. - Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate - their use (see ). - -
- -
- - HTTP header fields carry information as a series of key-value pairs. For a listing of - registered HTTP headers, see the Message Header Field Registry maintained at . - - -
- - While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the - status code for the response, HTTP/2 uses special pseudo-header fields beginning with - ':' character (ASCII 0x3a) for this purpose. - - - Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate - pseudo-header fields other than those defined in this document. - - - Pseudo-header fields are only valid in the context in which they are defined. - Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header - fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST - NOT appear in trailers. Endpoints MUST treat a request or response that contains - undefined or invalid pseudo-header fields as malformed. - - - Just as in HTTP/1.x, header field names are strings of ASCII characters that are - compared in a case-insensitive fashion. However, header field names MUST be converted - to lowercase prior to their encoding in HTTP/2. A request or response containing - uppercase header field names MUST be treated as malformed. - - - All pseudo-header fields MUST appear in the header block before regular header fields. - Any request or response that contains a pseudo-header field that appears in a header - block after a regular header field MUST be treated as malformed. - -
- -
- - HTTP/2 does not use the Connection header field to - indicate connection-specific header fields; in this protocol, connection-specific - metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message - containing connection-specific header fields; any message containing - connection-specific header fields MUST be treated as malformed. - - - This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need - to remove any header fields nominated by the Connection header field, along with the - Connection header field itself. Such intermediaries SHOULD also remove other - connection-specific header fields, such as Keep-Alive, Proxy-Connection, - Transfer-Encoding and Upgrade, even if they are not nominated by Connection. - - - One exception to this is the TE header field, which MAY be present in an HTTP/2 - request, but when it is MUST NOT contain any value other than "trailers". - - - - - HTTP/2 purposefully does not support upgrade to another protocol. The handshake - methods described in are believed sufficient to - negotiate the use of alternative protocols. - - - -
- -
- - The following pseudo-header fields are defined for HTTP/2 requests: - - - - The :method pseudo-header field includes the HTTP - method (). - - - - - The :scheme pseudo-header field includes the scheme - portion of the target URI (). - - - :scheme is not restricted to http and https schemed URIs. A - proxy or gateway can translate requests for non-HTTP schemes, enabling the use - of HTTP to interact with non-HTTP services. - - - - - The :authority pseudo-header field includes the - authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http - or https schemed URIs. - - - To ensure that the HTTP/1.1 request line can be reproduced accurately, this - pseudo-header field MUST be omitted when translating from an HTTP/1.1 request - that has a request target in origin or asterisk form (see ). Clients that generate - HTTP/2 requests directly SHOULD use the :authority pseudo-header - field instead of the Host header field. An - intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by - copying the value of the :authority pseudo-header - field. - - - - - The :path pseudo-header field includes the path and - query parts of the target URI (the path-absolute - production from and optionally a '?' character - followed by the query production, see and ). A request in asterisk form includes the value '*' for the - :path pseudo-header field. - - - This pseudo-header field MUST NOT be empty for http - or https URIs; http or - https URIs that do not contain a path component - MUST include a value of '/'. The exception to this rule is an OPTIONS request - for an http or https - URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ). - - - - - - All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory - pseudo-header fields is malformed. - - - HTTP/2 does not define a way to carry the version identifier that is included in the - HTTP/1.1 request line. - -
- -
- - For HTTP/2 responses, a single :status pseudo-header - field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all - responses, otherwise the response is malformed. - - - HTTP/2 does not define a way to carry the version or reason phrase that is included in - an HTTP/1.1 status line. - -
- -
- - The Cookie header field can carry a significant amount of - redundant data. - - - The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs"). - This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from - being separated into different name-value pairs. This can significantly reduce - compression efficiency as individual cookie-pairs are updated. - - - To allow for better compression efficiency, the Cookie header field MAY be split into - separate header fields, each with one or more cookie-pairs. If there are multiple - Cookie header fields after decompression, these MUST be concatenated into a single - octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ") - before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a - generic HTTP server application. - -
- - Therefore, the following two lists of Cookie header fields are semantically - equivalent. - - -
-
- -
- - A malformed request or response is one that is an otherwise valid sequence of HTTP/2 - frames, but is otherwise invalid due to the presence of extraneous frames, prohibited - header fields, the absence of mandatory header fields, or the inclusion of uppercase - header field names. - - - A request or response that includes an entity body can include a content-length header field. A request or response is also - malformed if the value of a content-length header field - does not equal the sum of the DATA frame payload lengths that form the - body. A response that is defined to have no payload, as described in , can have a non-zero - content-length header field, even though no content is - included in DATA frames. - - - Intermediaries that process HTTP requests or responses (i.e., any intermediary not - acting as a tunnel) MUST NOT forward a malformed request or response. Malformed - requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR. - - - For malformed requests, a server MAY send an HTTP response prior to closing or - resetting the stream. Clients MUST NOT accept a malformed response. Note that these - requirements are intended to protect against several types of common attacks against - HTTP; they are deliberately strict, because being permissive can expose - implementations to these vulnerabilities. - -
-
- -
- - This section shows HTTP/1.1 requests and responses, with illustrations of equivalent - HTTP/2 requests and responses. - - - An HTTP GET request includes request header fields and no body and is therefore - transmitted as a single HEADERS frame, followed by zero or more - CONTINUATION frames containing the serialized block of request header - fields. The HEADERS frame in the following has both the END_HEADERS and - END_STREAM flags set; no CONTINUATION frames are sent: - - -
- + END_STREAM - Accept: image/jpeg + END_HEADERS - :method = GET - :scheme = https - :path = /resource - host = example.org - accept = image/jpeg -]]> -
- - - Similarly, a response that includes only response header fields is transmitted as a - HEADERS frame (again, followed by zero or more - CONTINUATION frames) containing the serialized block of response header - fields. - - -
- + END_STREAM - Expires: Thu, 23 Jan ... + END_HEADERS - :status = 304 - etag = "xyzzy" - expires = Thu, 23 Jan ... -]]> -
- - - An HTTP POST request that includes request header fields and payload data is transmitted - as one HEADERS frame, followed by zero or more - CONTINUATION frames containing the request header fields, followed by one - or more DATA frames, with the last CONTINUATION (or - HEADERS) frame having the END_HEADERS flag set and the final - DATA frame having the END_STREAM flag set: - - -
- - END_STREAM - Content-Type: image/jpeg - END_HEADERS - Content-Length: 123 :method = POST - :path = /resource - {binary data} :scheme = https - - CONTINUATION - + END_HEADERS - content-type = image/jpeg - host = example.org - content-length = 123 - - DATA - + END_STREAM - {binary data} -]]> - - Note that data contributing to any given header field could be spread between header - block fragments. The allocation of header fields to frames in this example is - illustrative only. - -
- - - A response that includes header fields and payload data is transmitted as a - HEADERS frame, followed by zero or more CONTINUATION - frames, followed by one or more DATA frames, with the last - DATA frame in the sequence having the END_STREAM flag set: - - -
- - END_STREAM - Content-Length: 123 + END_HEADERS - :status = 200 - {binary data} content-type = image/jpeg - content-length = 123 - - DATA - + END_STREAM - {binary data} -]]> -
- - - Trailing header fields are sent as a header block after both the request or response - header block and all the DATA frames have been sent. The - HEADERS frame starting the trailers header block has the END_STREAM flag - set. - - -
- - END_STREAM - Transfer-Encoding: chunked + END_HEADERS - Trailer: Foo :status = 200 - content-length = 123 - 123 content-type = image/jpeg - {binary data} trailer = Foo - 0 - Foo: bar DATA - - END_STREAM - {binary data} - - HEADERS - + END_STREAM - + END_HEADERS - foo = bar -]]> -
- - -
- - An informational response using a 1xx status code other than 101 is transmitted as a - HEADERS frame, followed by zero or more CONTINUATION - frames: - - - END_STREAM - + END_HEADERS - :status = 103 - extension-field = bar -]]> -
-
- -
- - In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error - occurs, because there is no means to determine the nature of the error. It is possible - that some server processing occurred prior to the error, which could result in - undesirable effects if the request were reattempted. - - - HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has - not been processed: - - - The GOAWAY frame indicates the highest stream number that might have - been processed. Requests on streams with higher numbers are therefore guaranteed to - be safe to retry. - - - The REFUSED_STREAM error code can be included in a - RST_STREAM frame to indicate that the stream is being closed prior to - any processing having occurred. Any request that was sent on the reset stream can - be safely retried. - - - - - Requests that have not been processed have not failed; clients MAY automatically retry - them, even those with non-idempotent methods. - - - A server MUST NOT indicate that a stream has not been processed unless it can guarantee - that fact. If frames that are on a stream are passed to the application layer for any - stream, then REFUSED_STREAM MUST NOT be used for that stream, and a - GOAWAY frame MUST include a stream identifier that is greater than or - equal to the given stream identifier. - - - In addition to these mechanisms, the PING frame provides a way for a - client to easily test a connection. Connections that remain idle can become broken as - some middleboxes (for instance, network address translators, or load balancers) silently - discard connection bindings. The PING frame allows a client to safely - test whether a connection is still active without sending a request. - -
-
- -
- - HTTP/2 allows a server to pre-emptively send (or "push") responses (along with - corresponding "promised" requests) to a client in association with a previous - client-initiated request. This can be useful when the server knows the client will need - to have those responses available in order to fully process the response to the original - request. - - - - Pushing additional message exchanges in this fashion is optional, and is negotiated - between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set - to 0 to indicate that server push is disabled. - - - Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a - promised request that is not cacheable, unsafe or that includes a request body MUST - reset the stream with a stream error of type - PROTOCOL_ERROR. - - - Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP - cache. Pushed responses are considered successfully validated on the origin server (e.g., - if the "no-cache" cache response directive is present) while the stream identified by the - promised stream ID is still open. - - - Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY - be made available to the application separately. - - - An intermediary can receive pushes from the server and choose not to forward them on to - the client. In other words, how to make use of the pushed information is up to that - intermediary. Equally, the intermediary might choose to make additional pushes to the - client, without any action taken by the server. - - - A client cannot push. Thus, servers MUST treat the receipt of a - PUSH_PROMISE frame as a connection - error of type PROTOCOL_ERROR. Clients MUST reject any attempt to - change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating - the message as a connection error of type - PROTOCOL_ERROR. - - -
- - Server push is semantically equivalent to a server responding to a request; however, in - this case that request is also sent by the server, as a PUSH_PROMISE - frame. - - - The PUSH_PROMISE frame includes a header block that contains a complete - set of request header fields that the server attributes to the request. It is not - possible to push a response to a request that includes a request body. - - - - Pushed responses are always associated with an explicit request from the client. The - PUSH_PROMISE frames sent by the server are sent on that explicit - request's stream. The PUSH_PROMISE frame also includes a promised stream - identifier, chosen from the stream identifiers available to the server (see ). - - - - The header fields in PUSH_PROMISE and any subsequent - CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in - the :method header field that is safe and cacheable. If a - client receives a PUSH_PROMISE that does not include a complete and valid - set of header fields, or the :method header field identifies - a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR. - - - - The server SHOULD send PUSH_PROMISE () - frames prior to sending any frames that reference the promised responses. This avoids a - race where clients issue requests prior to receiving any PUSH_PROMISE - frames. - - - For example, if the server receives a request for a document containing embedded links - to multiple image files, and the server chooses to push those additional images to the - client, sending push promises before the DATA frames that contain the - image links ensures that the client is able to see the promises before discovering - embedded links. Similarly, if the server pushes responses referenced by the header block - (for instance, in Link header fields), sending the push promises before sending the - header block ensures that clients do not request them. - - - - PUSH_PROMISE frames MUST NOT be sent by the client. - - - PUSH_PROMISE frames can be sent by the server in response to any - client-initiated stream, but the stream MUST be in either the "open" or "half closed - (remote)" state with respect to the server. PUSH_PROMISE frames are - interspersed with the frames that comprise a response, though they cannot be - interspersed with HEADERS and CONTINUATION frames that - comprise a single header block. - - - Sending a PUSH_PROMISE frame creates a new stream and puts the stream - into the “reserved (local)” state for the server and the “reserved (remote)” state for - the client. - -
- -
- - After sending the PUSH_PROMISE frame, the server can begin delivering the - pushed response as a response on a server-initiated - stream that uses the promised stream identifier. The server uses this stream to - transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed" - to the client after the initial HEADERS frame is sent. - - - - Once a client receives a PUSH_PROMISE frame and chooses to accept the - pushed response, the client SHOULD NOT issue any requests for the promised response - until after the promised stream has closed. - - - - If the client determines, for any reason, that it does not wish to receive the pushed - response from the server, or if the server takes too long to begin sending the promised - response, the client can send an RST_STREAM frame, using either the - CANCEL or REFUSED_STREAM codes, and referencing the pushed - stream's identifier. - - - A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the - number of responses that can be concurrently pushed by a server. Advertising a - SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by - preventing the server from creating the necessary streams. This does not prohibit a - server from sending PUSH_PROMISE frames; clients need to reset any - promised streams that are not wanted. - - - - Clients receiving a pushed response MUST validate that either the server is - authoritative (see ), or the proxy that provided the pushed - response is configured for the corresponding request. For example, a server that offers - a certificate for only the example.com DNS-ID or Common Name - is not permitted to push a response for https://www.example.org/doc. - - - The response for a PUSH_PROMISE stream begins with a - HEADERS frame, which immediately puts the stream into the “half closed - (remote)” state for the server and “half closed (local)” state for the client, and ends - with a frame bearing END_STREAM, which places the stream in the "closed" state. - - - The client never sends a frame with the END_STREAM flag for a server push. - - - -
- -
- -
- - In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host. - CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin - server for the purposes of interacting with https resources. - - - In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to - a remote host, for similar purposes. The HTTP header field mapping works as defined in - Request Header Fields, with a few - differences. Specifically: - - - The :method header field is set to CONNECT. - - - The :scheme and :path header - fields MUST be omitted. - - - The :authority header field contains the host and port to - connect to (equivalent to the authority-form of the request-target of CONNECT - requests, see ). - - - - - A proxy that supports CONNECT establishes a TCP connection to - the server identified in the :authority header field. Once - this connection is successfully established, the proxy sends a HEADERS - frame containing a 2xx series status code to the client, as defined in . - - - After the initial HEADERS frame sent by each peer, all subsequent - DATA frames correspond to data sent on the TCP connection. The payload of - any DATA frames sent by the client is transmitted by the proxy to the TCP - server; data received from the TCP server is assembled into DATA frames by - the proxy. Frame types other than DATA or stream management frames - (RST_STREAM, WINDOW_UPDATE, and PRIORITY) - MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received. - - - The TCP connection can be closed by either peer. The END_STREAM flag on a - DATA frame is treated as being equivalent to the TCP FIN bit. A client is - expected to send a DATA frame with the END_STREAM flag set after receiving - a frame bearing the END_STREAM flag. A proxy that receives a DATA frame - with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP - segment. A proxy that receives a TCP segment with the FIN bit set sends a - DATA frame with the END_STREAM flag set. Note that the final TCP segment - or DATA frame could be empty. - - - A TCP connection error is signaled with RST_STREAM. A proxy treats any - error in the TCP connection, which includes receiving a TCP segment with the RST bit set, - as a stream error of type - CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the - RST bit set if it detects an error with the stream or the HTTP/2 connection. - -
-
- -
- - This section outlines attributes of the HTTP protocol that improve interoperability, reduce - exposure to known security vulnerabilities, or reduce the potential for implementation - variation. - - -
- - HTTP/2 connections are persistent. For best performance, it is expected clients will not - close connections until it is determined that no further communication with a server is - necessary (for example, when a user navigates away from a particular web page), or until - the server closes the connection. - - - Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair, - where host is derived from a URI, a selected alternative - service, or a configured proxy. - - - A client can create additional connections as replacements, either to replace connections - that are near to exhausting the available stream - identifier space, to refresh the keying material for a TLS connection, or to - replace connections that have encountered errors. - - - A client MAY open multiple connections to the same IP address and TCP port using different - Server Name Indication values or to provide different TLS - client certificates, but SHOULD avoid creating multiple connections with the same - configuration. - - - Servers are encouraged to maintain open connections for as long as possible, but are - permitted to terminate idle connections if necessary. When either endpoint chooses to - close the transport-layer TCP connection, the terminating endpoint SHOULD first send a - GOAWAY () frame so that both endpoints can reliably - determine whether previously sent frames have been processed and gracefully complete or - terminate any necessary remaining tasks. - - -
- - Connections that are made to an origin servers, either directly or through a tunnel - created using the CONNECT method MAY be reused for - requests with multiple different URI authority components. A connection can be reused - as long as the origin server is authoritative. For - http resources, this depends on the host having resolved to - the same IP address. - - - For https resources, connection reuse additionally depends - on having a certificate that is valid for the host in the URI. An origin server might - offer a certificate with multiple subjectAltName attributes, - or names with wildcards, one of which is valid for the authority in the URI. For - example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for - requests to URIs starting with https://a.example.com/ and - https://b.example.com/. - - - In some deployments, reusing a connection for multiple origins can result in requests - being directed to the wrong origin server. For example, TLS termination might be - performed by a middlebox that uses the TLS Server Name Indication - (SNI) extension to select an origin server. This means that it is possible - for clients to send confidential information to servers that might not be the intended - target for the request, even though the server is otherwise authoritative. - - - A server that does not wish clients to reuse connections can indicate that it is not - authoritative for a request by sending a 421 (Misdirected Request) status code in response - to the request (see ). - - - A client that is configured to use a proxy over HTTP/2 directs requests to that proxy - through a single connection. That is, all requests sent via a proxy reuse the - connection to the proxy. - -
- -
- - The 421 (Misdirected Request) status code indicates that the request was directed at a - server that is not able to produce a response. This can be sent by a server that is not - configured to produce responses for the combination of scheme and authority that are - included in the request URI. - - - Clients receiving a 421 (Misdirected Request) response from a server MAY retry the - request - whether the request method is idempotent or not - over a different connection. - This is possible if a connection is reused () or if an alternative - service is selected (). - - - This status code MUST NOT be generated by proxies. - - - A 421 response is cacheable by default; i.e., unless otherwise indicated by the method - definition or explicit cache controls (see ). - -
-
- -
- - Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over - TLS. The general TLS usage guidance in SHOULD be followed, with - some additional restrictions that are specific to HTTP/2. - - - - An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on - feature set and cipher suite described in this section. Due to implementation - limitations, it might not be possible to fail TLS negotiation. An endpoint MUST - immediately terminate an HTTP/2 connection that does not meet these minimum requirements - with a connection error of type - INADEQUATE_SECURITY. - - -
- - The TLS implementation MUST support the Server Name Indication - (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when - negotiating TLS. - - - The TLS implementation MUST disable compression. TLS compression can lead to the - exposure of information that would not otherwise be revealed . - Generic compression is unnecessary since HTTP/2 provides compression features that are - more aware of context and therefore likely to be more appropriate for use for - performance, security or other reasons. - - - The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS - renegotiation as a connection error of type - PROTOCOL_ERROR. Note that disabling renegotiation can result in - long-lived connections becoming unusable due to limits on the number of messages the - underlying cipher suite can encipher. - - - A client MAY use renegotiation to provide confidentiality protection for client - credentials offered in the handshake, but any renegotiation MUST occur prior to sending - the connection preface. A server SHOULD request a client certificate if it sees a - renegotiation request immediately after establishing a connection. - - - This effectively prevents the use of renegotiation in response to a request for a - specific protected resource. A future specification might provide a way to support this - use case. - -
- -
- - The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST - only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST - have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE. - Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher - suites that use stream or block ciphers. Authenticated Encryption with Additional Data - (AEAD) modes, such as the Galois Counter Model (GCM) mode for - AES are acceptable. - - - The effect of these restrictions is that TLS 1.2 implementations could have - non-intersecting sets of available cipher suites, since these prevent the use of the - cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of - HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 . - - - Clients MAY advertise support of cipher suites that are prohibited by the above - restrictions in order to allow for connection to servers that do not support HTTP/2. - This enables a fallback to protocols without these constraints without the additional - latency imposed by using a separate connection for fallback. - -
-
-
- -
-
- - HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is - authoritative in providing a given response, see . This relies on local name resolution for the "http" - URI scheme, and the authenticated server identity for the "https" scheme (see ). - -
- -
- - In a cross-protocol attack, an attacker causes a client to initiate a transaction in one - protocol toward a server that understands a different protocol. An attacker might be able - to cause the transaction to appear as valid transaction in the second protocol. In - combination with the capabilities of the web context, this can be used to interact with - poorly protected servers in private networks. - - - Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient - protection against cross protocol attacks. ALPN provides a positive indication that a - server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based - protocols. - - - The encryption in TLS makes it difficult for attackers to control the data which could be - used in a cross-protocol attack on a cleartext protocol. - - - The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks. - The connection preface contains a string that is - designed to confuse HTTP/1.1 servers, but no special protection is offered for other - protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an - Upgrade header field in addition to the client connection preface could be exposed to a - cross-protocol attack. - -
- -
- - HTTP/2 header field names and values are encoded as sequences of octets with a length - prefix. This enables HTTP/2 to carry any string of octets as the name or value of a - header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1 - directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might - exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal - header fields, extra header fields, or even new messages that are entirely falsified. - - - Header field names or values that contain characters not permitted by HTTP/1.1, including - carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an - intermediary, as stipulated in . - - - Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker. - Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values. - -
- -
- - Pushed responses do not have an explicit request from the client; the request - is provided by the server in the PUSH_PROMISE frame. - - - Caching responses that are pushed is possible based on the guidance provided by the origin - server in the Cache-Control header field. However, this can cause issues if a single - server hosts more than one tenant. For example, a server might offer multiple users each - a small portion of its URI space. - - - Where multiple tenants share space on the same server, that server MUST ensure that - tenants are not able to push representations of resources that they do not have authority - over. Failure to enforce this would allow a tenant to provide a representation that would - be served out of cache, overriding the actual representation that the authoritative tenant - provides. - - - Pushed responses for which an origin server is not authoritative (see - ) are never cached or used. - -
- -
- - An HTTP/2 connection can demand a greater commitment of resources to operate than a - HTTP/1.1 connection. The use of header compression and flow control depend on a - commitment of resources for storing a greater amount of state. Settings for these - features ensure that memory commitments for these features are strictly bounded. - - - The number of PUSH_PROMISE frames is not constrained in the same fashion. - A client that accepts server push SHOULD limit the number of streams it allows to be in - the "reserved (remote)" state. Excessive number of server push streams can be treated as - a stream error of type - ENHANCE_YOUR_CALM. - - - Processing capacity cannot be guarded as effectively as state capacity. - - - The SETTINGS frame can be abused to cause a peer to expend additional - processing time. This might be done by pointlessly changing SETTINGS parameters, setting - multiple undefined parameters, or changing the same setting multiple times in the same - frame. WINDOW_UPDATE or PRIORITY frames can be abused to - cause an unnecessary waste of resources. - - - Large numbers of small or empty frames can be abused to cause a peer to expend time - processing frame headers. Note however that some uses are entirely legitimate, such as - the sending of an empty DATA frame to end a stream. - - - Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses. - - - Limits in SETTINGS parameters cannot be reduced instantaneously, which - leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In - particular, immediately after establishing a connection, limits set by a server are not - known to clients and could be exceeded without being an obvious protocol violation. - - - All these features - i.e., SETTINGS changes, small frames, header - compression - have legitimate uses. These features become a burden only when they are - used unnecessarily or to excess. - - - An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of - service attack. Implementations SHOULD track the use of these features and set limits on - their use. An endpoint MAY treat activity that is suspicious as a connection error of type - ENHANCE_YOUR_CALM. - - -
- - A large header block can cause an implementation to - commit a large amount of state. Header fields that are critical for routing can appear - toward the end of a header block, which prevents streaming of header fields to their - ultimate destination. For this an other reasons, such as ensuring cache correctness, - means that an endpoint might need to buffer the entire header block. Since there is no - hard limit to the size of a header block, some endpoints could be forced commit a large - amount of available memory for header fields. - - - An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of - limits that might apply on the size of header blocks. This setting is only advisory, so - endpoints MAY choose to send header blocks that exceed this limit and risk having the - request or response being treated as malformed. This setting specific to a connection, - so any request or response could encounter a hop with a lower, unknown limit. An - intermediary can attempt to avoid this problem by passing on values presented by - different peers, but they are not obligated to do so. - - - A server that receives a larger header block than it is willing to handle can send an - HTTP 431 (Request Header Fields Too Large) status code . A - client can discard responses that it cannot process. The header block MUST be processed - to ensure a consistent connection state, unless the connection is closed. - -
-
- -
- - HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover - secret data when it is compressed in the same context as data under attacker control. - - - There are demonstrable attacks on compression that exploit the characteristics of the web - (e.g., ). The attacker induces multiple requests containing - varying plaintext, observing the length of the resulting ciphertext in each, which - reveals a shorter length when a guess about the secret is correct. - - - Implementations communicating on a secure channel MUST NOT compress content that includes - both confidential and attacker-controlled data unless separate compression dictionaries - are used for each source of data. Compression MUST NOT be used if the source of data - cannot be reliably determined. Generic stream compression, such as that provided by TLS - MUST NOT be used with HTTP/2 (). - - - Further considerations regarding the compression of header fields are described in . - -
- -
- - Padding within HTTP/2 is not intended as a replacement for general purpose padding, such - as might be provided by TLS. Redundant padding could even be - counterproductive. Correct application can depend on having specific knowledge of the - data that is being padded. - - - To mitigate attacks that rely on compression, disabling or limiting compression might be - preferable to padding as a countermeasure. - - - Padding can be used to obscure the exact size of frame content, and is provided to - mitigate specific attacks within HTTP. For example, attacks where compressed content - includes both attacker-controlled plaintext and secret data (see for example, ). - - - Use of padding can result in less protection than might seem immediately obvious. At - best, padding only makes it more difficult for an attacker to infer length information by - increasing the number of frames an attacker has to observe. Incorrectly implemented - padding schemes can be easily defeated. In particular, randomized padding with a - predictable distribution provides very little protection; similarly, padding payloads to a - fixed size exposes information as payload sizes cross the fixed size boundary, which could - be possible if an attacker can control plaintext. - - - Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding - for HEADERS and PUSH_PROMISE frames. A valid reason for an - intermediary to change the amount of padding of frames is to improve the protections that - padding provides. - -
- -
- - Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions - of a single client or server over time. This includes the value of settings, the manner - in which flow control windows are managed, the way priorities are allocated to streams, - timing of reactions to stimulus, and handling of any optional features. - - - As far as this creates observable differences in behavior, they could be used as a basis - for fingerprinting a specific client, as defined in . - -
-
- -
- - A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation - (ALPN) Protocol IDs" registry established in . - - - This document establishes a registry for frame types, settings, and error codes. These new - registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section. - - - This document registers the HTTP2-Settings header field for - use in HTTP; and the 421 (Misdirected Request) status code. - - - This document registers the PRI method for use in HTTP, to avoid - collisions with the connection preface. - - -
- - This document creates two registrations for the identification of HTTP/2 in the - "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in . - - - The "h2" string identifies HTTP/2 when used over TLS: - - HTTP/2 over TLS - 0x68 0x32 ("h2") - This document - - - - The "h2c" string identifies HTTP/2 when used over cleartext TCP: - - HTTP/2 over TCP - 0x68 0x32 0x63 ("h2c") - This document - - -
- -
- - This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame - Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under - either of the "IETF Review" or "IESG Approval" policies for - values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for - experimental use. - - - New entries in this registry require the following information: - - - A name or label for the frame type. - - - The 8-bit code assigned to the frame type. - - - A reference to a specification that includes a description of the frame layout, - it's semantics and flags that the frame type uses, including any parts of the frame - that are conditionally present based on the value of flags. - - - - - The entries in the following table are registered by this document. - - - Frame Type - Code - Section - DATA0x0 - HEADERS0x1 - PRIORITY0x2 - RST_STREAM0x3 - SETTINGS0x4 - PUSH_PROMISE0x5 - PING0x6 - GOAWAY0x7 - WINDOW_UPDATE0x8 - CONTINUATION0x9 - -
- -
- - This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry - manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to - 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use. - - - New registrations are advised to provide the following information: - - - A symbolic name for the setting. Specifying a setting name is optional. - - - The 16-bit code assigned to the setting. - - - An initial value for the setting. - - - An optional reference to a specification that describes the use of the setting. - - - - - An initial set of setting registrations can be found in . - - - Name - Code - Initial Value - Specification - HEADER_TABLE_SIZE - 0x14096 - ENABLE_PUSH - 0x21 - MAX_CONCURRENT_STREAMS - 0x3(infinite) - INITIAL_WINDOW_SIZE - 0x465535 - MAX_FRAME_SIZE - 0x516384 - MAX_HEADER_LIST_SIZE - 0x6(infinite) - - -
- -
- - This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code" - registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the - "Expert Review" policy. - - - Registrations for error codes are required to include a description of the error code. An - expert reviewer is advised to examine new registrations for possible duplication with - existing error codes. Use of existing registrations is to be encouraged, but not - mandated. - - - New registrations are advised to provide the following information: - - - A name for the error code. Specifying an error code name is optional. - - - The 32-bit error code value. - - - A brief description of the error code semantics, longer if no detailed specification - is provided. - - - An optional reference for a specification that defines the error code. - - - - - The entries in the following table are registered by this document. - - - Name - Code - Description - Specification - NO_ERROR0x0 - Graceful shutdown - - PROTOCOL_ERROR0x1 - Protocol error detected - - INTERNAL_ERROR0x2 - Implementation fault - - FLOW_CONTROL_ERROR0x3 - Flow control limits exceeded - - SETTINGS_TIMEOUT0x4 - Settings not acknowledged - - STREAM_CLOSED0x5 - Frame received for closed stream - - FRAME_SIZE_ERROR0x6 - Frame size incorrect - - REFUSED_STREAM0x7 - Stream not processed - - CANCEL0x8 - Stream cancelled - - COMPRESSION_ERROR0x9 - Compression state not updated - - CONNECT_ERROR0xa - TCP connection error for CONNECT method - - ENHANCE_YOUR_CALM0xb - Processing capacity exceeded - - INADEQUATE_SECURITY0xc - Negotiated TLS parameters not acceptable - - - -
- -
- - This section registers the HTTP2-Settings header field in the - Permanent Message Header Field Registry. - - - HTTP2-Settings - - - http - - - standard - - - IETF - - - of this document - - - This header field is only used by an HTTP/2 client for Upgrade-based negotiation. - - - -
- -
- - This section registers the PRI method in the HTTP Method - Registry (). - - - PRI - - - No - - - No - - - of this document - - - This method is never used by an actual client. This method will appear to be used - when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection - preface. - - - -
- -
- - This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext - Transfer Protocol (HTTP) Status Code Registry (). - - - - - 421 - - - Misdirected Request - - - of this document - - - -
- -
- -
- - This document includes substantial input from the following individuals: - - - Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin - Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin - Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY - contributors). - - - Gabriel Montenegro and Willy Tarreau (Upgrade mechanism). - - - William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto - Peon, Rob Trace (Flow control). - - - Mike Bishop (Extensibility). - - - Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan - (Substantial editorial contributions). - - - Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp. - - - Alexey Melnikov was an editor of this document during 2013. - - - A substantial proportion of Martin's contribution was supported by Microsoft during his - employment there. - - - -
-
- - - - - - HPACK - Header Compression for HTTP/2 - - - - - - - - - - - - Transmission Control Protocol - - - University of Southern California (USC)/Information Sciences - Institute - - - - - - - - - - - Key words for use in RFCs to Indicate Requirement Levels - - - Harvard University -
sob@harvard.edu
-
- -
- - -
- - - - - HTTP Over TLS - - - - - - - - - - Uniform Resource Identifier (URI): Generic - Syntax - - - - - - - - - - - - The Base16, Base32, and Base64 Data Encodings - - - - - - - - - Guidelines for Writing an IANA Considerations Section in RFCs - - - - - - - - - - - Augmented BNF for Syntax Specifications: ABNF - - - - - - - - - - - The Transport Layer Security (TLS) Protocol Version 1.2 - - - - - - - - - - - Transport Layer Security (TLS) Extensions: Extension Definitions - - - - - - - - - - Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension - - - - - - - - - - - - - TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois - Counter Mode (GCM) - - - - - - - - - - - Digital Signature Standard (DSS) - - NIST - - - - - - - - - Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - - Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- -
- - - Hypertext Transfer Protocol (HTTP/1.1): Range Requests - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - World Wide Web Consortium -
ylafon@w3.org
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- -
- - - Hypertext Transfer Protocol (HTTP/1.1): Caching - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - Akamai -
mnot@mnot.net
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - Hypertext Transfer Protocol (HTTP/1.1): Authentication - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - - HTTP State Management Mechanism - - - - - -
- - - - - - TCP Extensions for High Performance - - - - - - - - - - - - Transport Layer Security Protocol Compression Methods - - - - - - - - - Additional HTTP Status Codes - - - - - - - - - - - Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) - - - - - - - - - - - - - - - AES Galois Counter Mode (GCM) Cipher Suites for TLS - - - - - - - - - - - - HTML5 - - - - - - - - - - - Latest version available at - . - - - - - - - Talking to Yourself for Fun and Profit - - - - - - - - - - - - - - BREACH: Reviving the CRIME Attack - - - - - - - - - - - Registration Procedures for Message Header Fields - - Nine by Nine -
GK-IETF@ninebynine.org
-
- - BEA Systems -
mnot@pobox.com
-
- - HP Labs -
JeffMogul@acm.org
-
- -
- - -
- - - - Recommendations for Secure Use of TLS and DTLS - - - - - - - - - - - - - - - - - - HTTP Alternative Services - - - Akamai - - - Mozilla - - - greenbytes - - - - - - -
- -
- - This section is to be removed by RFC Editor before publication. - - -
- - Renamed Not Authoritative status code to Misdirected Request. - -
- -
- - Pseudo-header fields are now required to appear strictly before regular ones. - - - Restored 1xx series status codes, except 101. - - - Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting - to limit the damage. - - - Added a setting to advise peers of header set size limits. - - - Removed segments. - - - Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping. - -
- -
- - Restored extensibility options. - - - Restricting TLS cipher suites to AEAD only. - - - Removing Content-Encoding requirements. - - - Permitting the use of PRIORITY after stream close. - - - Removed ALTSVC frame. - - - Removed BLOCKED frame. - - - Reducing the maximum padding size to 256 octets; removing padding from - CONTINUATION frames. - - - Removed per-frame GZIP compression. - -
- -
- - Added BLOCKED frame (at risk). - - - Simplified priority scheme. - - - Added DATA per-frame GZIP compression. - -
- -
- - Changed "connection header" to "connection preface" to avoid confusion. - - - Added dependency-based stream prioritization. - - - Added "h2c" identifier to distinguish between cleartext and secured HTTP/2. - - - Adding missing padding to PUSH_PROMISE. - - - Integrate ALTSVC frame and supporting text. - - - Dropping requirement on "deflate" Content-Encoding. - - - Improving security considerations around use of compression. - -
- -
- - Adding padding for data frames. - - - Renumbering frame types, error codes, and settings. - - - Adding INADEQUATE_SECURITY error code. - - - Updating TLS usage requirements to 1.2; forbidding TLS compression. - - - Removing extensibility for frames and settings. - - - Changing setting identifier size. - - - Removing the ability to disable flow control. - - - Changing the protocol identification token to "h2". - - - Changing the use of :authority to make it optional and to allow userinfo in non-HTTP - cases. - - - Allowing split on 0x0 for Cookie. - - - Reserved PRI method in HTTP/1.1 to avoid possible future collisions. - -
- -
- - Added cookie crumbling for more efficient header compression. - - - Added header field ordering with the value-concatenation mechanism. - -
- -
- - Marked draft for implementation. - -
- -
- - Adding definition for CONNECT method. - - - Constraining the use of push to safe, cacheable methods with no request body. - - - Changing from :host to :authority to remove any potential confusion. - - - Adding setting for header compression table size. - - - Adding settings acknowledgement. - - - Removing unnecessary and potentially problematic flags from CONTINUATION. - - - Added denial of service considerations. - -
-
- - Marking the draft ready for implementation. - - - Renumbering END_PUSH_PROMISE flag. - - - Editorial clarifications and changes. - -
- -
- - Added CONTINUATION frame for HEADERS and PUSH_PROMISE. - - - PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is - zero. - - - Push expanded to allow all safe methods without a request body. - - - Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1 - hop-by-hop header fields. - - - Requiring that intermediaries not forward requests with missing or illegal routing - :-headers. - - - Clarified requirements around handling different frames after stream close, stream reset - and GOAWAY. - - - Added more specific prohibitions for sending of different frame types in various stream - states. - - - Making the last received setting value the effective value. - - - Clarified requirements on TLS version, extension and ciphers. - -
- -
- - Committed major restructuring atrocities. - - - Added reference to first header compression draft. - - - Added more formal description of frame lifecycle. - - - Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA. - - - Removed HEADERS+PRIORITY, added optional priority to HEADERS frame. - - - Added PRIORITY frame. - -
- -
- - Added continuations to frames carrying header blocks. - - - Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful - concepts, like cookies. - - - Removed "message". - - - Switched to TLS ALPN from NPN. - - - Editorial changes. - -
- -
- - Added IANA considerations section for frame types, error codes and settings. - - - Removed data frame compression. - - - Added PUSH_PROMISE. - - - Added globally applicable flags to framing. - - - Removed zlib-based header compression mechanism. - - - Updated references. - - - Clarified stream identifier reuse. - - - Removed CREDENTIALS frame and associated mechanisms. - - - Added advice against naive implementation of flow control. - - - Added session header section. - - - Restructured frame header. Removed distinction between data and control frames. - - - Altered flow control properties to include session-level limits. - - - Added note on cacheability of pushed resources and multiple tenant servers. - - - Changed protocol label form based on discussions. - -
- -
- - Changed title throughout. - - - Removed section on Incompatibilities with SPDY draft#2. - - - Changed INTERNAL_ERROR on GOAWAY to have a value of 2 . - - - Replaced abstract and introduction. - - - Added section on starting HTTP/2.0, including upgrade mechanism. - - - Removed unused references. - - - Added flow control principles based on . - -
- -
- - Adopted as base for draft-ietf-httpbis-http2. - - - Updated authors/editors list. - - - Added status note. - -
-
- -
-
- diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go deleted file mode 100644 index d23a226..0000000 --- a/vendor/golang.org/x/net/http2/transport.go +++ /dev/null @@ -1,2310 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code. - -package http2 - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/rand" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "math" - mathrand "math/rand" - "net" - "net/http" - "sort" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http/httpguts" - "golang.org/x/net/http2/hpack" - "golang.org/x/net/idna" -) - -const ( - // transportDefaultConnFlow is how many connection-level flow control - // tokens we give the server at start-up, past the default 64k. - transportDefaultConnFlow = 1 << 30 - - // transportDefaultStreamFlow is how many stream-level flow - // control tokens we announce to the peer, and how many bytes - // we buffer per stream. - transportDefaultStreamFlow = 4 << 20 - - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - - defaultUserAgent = "Go-http-client/2.0" -) - -// Transport is an HTTP/2 Transport. -// -// A Transport internally caches connections to servers. It is safe -// for concurrent use by multiple goroutines. -type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. - // - // If DialTLS is nil, tls.Dial is used. - // - // If the returned net.Conn has a ConnectionState method like tls.Conn, - // it will be used to set http.Response.TLS. - DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // ConnPool optionally specifies an alternate connection pool to use. - // If nil, the default is used. - ConnPool ClientConnPool - - // DisableCompression, if true, prevents the Transport from - // requesting compression with an "Accept-Encoding: gzip" - // request header when the Request contains no existing - // Accept-Encoding value. If the Transport requests gzip on - // its own and gets a gzipped response, it's transparently - // decoded in the Response.Body. However, if the user - // explicitly requested gzip it is not automatically - // uncompressed. - DisableCompression bool - - // AllowHTTP, if true, permits HTTP/2 requests using the insecure, - // plain-text "http" scheme. Note that this does not enable h2c support. - AllowHTTP bool - - // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to - // send in the initial settings frame. It is how many bytes - // of response headers are allowed. Unlike the http2 spec, zero here - // means to use a default limit (currently 10MB). If you actually - // want to advertise an ulimited value to the peer, Transport - // interprets the highest possible value here (0xffffffff or 1<<32-1) - // to mean no limit. - MaxHeaderListSize uint32 - - // t1, if non-nil, is the standard library Transport using - // this transport. Its settings are used (but not its - // RoundTrip method, etc). - t1 *http.Transport - - connPoolOnce sync.Once - connPoolOrDef ClientConnPool // non-nil version of ConnPool -} - -func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { - return 10 << 20 - } - if t.MaxHeaderListSize == 0xffffffff { - return 0 - } - return t.MaxHeaderListSize -} - -func (t *Transport) disableCompression() bool { - return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) -} - -var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") - -// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It requires Go 1.6 or later and returns an error if the net/http package is too old -// or if t1 has already been HTTP/2-enabled. -func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go - return err -} - -func (t *Transport) connPool() ClientConnPool { - t.connPoolOnce.Do(t.initConnPool) - return t.connPoolOrDef -} - -func (t *Transport) initConnPool() { - if t.ConnPool != nil { - t.connPoolOrDef = t.ConnPool - } else { - t.connPoolOrDef = &clientConnPool{t: t} - } -} - -// ClientConn is the state of a single HTTP/2 client connection to an -// HTTP/2 server. -type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls - singleUse bool // whether being used for a single http.Request - - // readLoop goroutine fields: - readerDone chan struct{} // closed on error - readerErr error // set before readerDone is closed - - idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer - - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closed bool - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - bw *bufio.Writer - br *bufio.Reader - fr *Framer - lastActive time.Time - // Settings from peer: (also guarded by mu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - initialWindowSize uint32 - - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte - - wmu sync.Mutex // held while writing; acquire AFTER mu if holding both - werr error // first write error that has occurred -} - -// clientStream is the state for a single HTTP/2 stream. One of these -// is created for each Transport.RoundTrip call. -type clientStream struct { - cc *ClientConn - req *http.Request - trace *clientTrace // or nil - ID uint32 - resc chan resAndError - bufPipe pipe // buffered pipe with the flow-controlled response payload - startedWrite bool // started request body write; guarded by cc.mu - requestedGzip bool - on100 func() // optional code to run if get a 100 continue response - - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu - didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu - - peerReset chan struct{} // closed on peer reset - resetErr error // populated before peerReset is closed - - done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu - - // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - - trailer http.Header // accumulated trailers - resTrailer *http.Header // client's Response.Trailer -} - -// awaitRequestCancel waits for the user to cancel a request or for the done -// channel to be signaled. A non-nil error is returned only if the request was -// canceled. -func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { - ctx := reqContext(req) - if req.Cancel == nil && ctx.Done() == nil { - return nil - } - select { - case <-req.Cancel: - return errRequestCanceled - case <-ctx.Done(): - return ctx.Err() - case <-done: - return nil - } -} - -// awaitRequestCancel waits for the user to cancel a request, its context to -// expire, or for the request to be done (any way it might be removed from the -// cc.streams map: peer reset, successful completion, TCP connection breakage, -// etc). If the request is canceled, then cs will be canceled and closed. -func (cs *clientStream) awaitRequestCancel(req *http.Request) { - if err := awaitRequestCancel(req, cs.done); err != nil { - cs.cancelStream() - cs.bufPipe.CloseWithError(err) - } -} - -func (cs *clientStream) cancelStream() { - cc := cs.cc - cc.mu.Lock() - didReset := cs.didReset - cs.didReset = true - cc.mu.Unlock() - - if !didReset { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - cc.forgetStreamID(cs.ID) - } -} - -// checkResetOrDone reports any error sent in a RST_STREAM frame by the -// server, or errStreamClosed if the stream is complete. -func (cs *clientStream) checkResetOrDone() error { - select { - case <-cs.peerReset: - return cs.resetErr - case <-cs.done: - return errStreamClosed - default: - return nil - } -} - -func (cs *clientStream) getStartedWrite() bool { - cc := cs.cc - cc.mu.Lock() - defer cc.mu.Unlock() - return cs.startedWrite -} - -func (cs *clientStream) abortRequestBodyWrite(err error) { - if err == nil { - panic("nil error") - } - cc := cs.cc - cc.mu.Lock() - cs.stopReqBody = err - cc.cond.Broadcast() - cc.mu.Unlock() -} - -type stickyErrWriter struct { - w io.Writer - err *error -} - -func (sew stickyErrWriter) Write(p []byte) (n int, err error) { - if *sew.err != nil { - return 0, *sew.err - } - n, err = sew.w.Write(p) - *sew.err = err - return -} - -// noCachedConnError is the concrete type of ErrNoCachedConn, which -// needs to be detected by net/http regardless of whether it's its -// bundled version (in h2_bundle.go with a rewritten type name) or -// from a user's x/net/http2. As such, as it has a unique method name -// (IsHTTP2NoCachedConnError) that net/http sniffs for via func -// isNoCachedConnError. -type noCachedConnError struct{} - -func (noCachedConnError) IsHTTP2NoCachedConnError() {} -func (noCachedConnError) Error() string { return "http2: no cached connection was available" } - -// isNoCachedConnError reports whether err is of type noCachedConnError -// or its equivalent renamed type in net/http2's h2_bundle.go. Both types -// may coexist in the same running program. -func isNoCachedConnError(err error) bool { - _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) - return ok -} - -var ErrNoCachedConn error = noCachedConnError{} - -// RoundTripOpt are options for the Transport.RoundTripOpt method. -type RoundTripOpt struct { - // OnlyCachedConn controls whether RoundTripOpt may - // create a new TCP connection. If set true and - // no cached connection is available, RoundTripOpt - // will return ErrNoCachedConn. - OnlyCachedConn bool -} - -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.RoundTripOpt(req, RoundTripOpt{}) -} - -// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) -// and returns a host:port. The port 443 is added if needed. -func authorityAddr(scheme string, authority string) (addr string) { - host, port, err := net.SplitHostPort(authority) - if err != nil { // authority didn't have a port - port = "443" - if scheme == "http" { - port = "80" - } - host = authority - } - if a, err := idna.ToASCII(host); err == nil { - host = a - } - // IPv6 address literal, without a port: - if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { - return host + ":" + port - } - return net.JoinHostPort(host, port) -} - -// RoundTripOpt is like RoundTrip, but takes options. -func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { - return nil, errors.New("http2: unsupported scheme") - } - - addr := authorityAddr(req.URL.Scheme, req.URL.Host) - for retry := 0; ; retry++ { - cc, err := t.connPool().GetClientConn(req, addr) - if err != nil { - t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) - return nil, err - } - traceGotConn(req, cc) - res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) - if err != nil && retry <= 6 { - if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { - // After the first retry, do exponential backoff with 10% jitter. - if retry == 0 { - continue - } - backoff := float64(uint(1) << (uint(retry) - 1)) - backoff += backoff * (0.1 * mathrand.Float64()) - select { - case <-time.After(time.Second * time.Duration(backoff)): - continue - case <-reqContext(req).Done(): - return nil, reqContext(req).Err() - } - } - } - if err != nil { - t.vlogf("RoundTrip failure: %v", err) - return nil, err - } - return res, nil - } -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle. -// It does not interrupt any connections currently in use. -func (t *Transport) CloseIdleConnections() { - if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { - cp.closeIdleConnections() - } -} - -var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") -) - -// shouldRetryRequest is called by RoundTrip when a request fails to get -// response headers. It is always called with a non-nil error. -// It returns either a request to retry (either the same request, or a -// modified clone), or an error if the request can't be replayed. -func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { - if !canRetryError(err) { - return nil, err - } - if !afterBodyWrite { - return req, nil - } - // If the Body is nil (or http.NoBody), it's safe to reuse - // this request and its Body. - if req.Body == nil || reqBodyIsNoBody(req.Body) { - return req, nil - } - // Otherwise we depend on the Request having its GetBody - // func defined. - getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody == nil { - return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) - } - body, err := getBody() - if err != nil { - return nil, err - } - newReq := *req - newReq.Body = body - return &newReq, nil -} - -func canRetryError(err error) bool { - if err == errClientConnUnusable || err == errClientConnGotGoAway { - return true - } - if se, ok := err.(StreamError); ok { - return se.Code == ErrCodeRefusedStream - } - return false -} - -func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) - if err != nil { - return nil, err - } - return t.newClientConn(tconn, singleUse) -} - -func (t *Transport) newTLSConfig(host string) *tls.Config { - cfg := new(tls.Config) - if t.TLSClientConfig != nil { - *cfg = *cloneTLSConfig(t.TLSClientConfig) - } - if !strSliceContains(cfg.NextProtos, NextProtoTLS) { - cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) - } - if cfg.ServerName == "" { - cfg.ServerName = host - } - return cfg -} - -func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS - } - return t.dialTLSDefault -} - -func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - } - state := cn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return cn, nil -} - -// disableKeepAlives reports whether connections should be closed as -// soon as possible after handling the first request. -func (t *Transport) disableKeepAlives() bool { - return t.t1 != nil && t.t1.DisableKeepAlives -} - -func (t *Transport) expectContinueTimeout() time.Duration { - if t.t1 == nil { - return 0 - } - return transportExpectContinueTimeout(t.t1) -} - -func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, false) -} - -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { - cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) - } - if VerboseLogs { - t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) - } - - cc.cond = sync.NewCond(&cc.mu) - cc.flow.add(int32(initialWindowSize)) - - // TODO: adjust this writer size to account for frame size + - // MTU + crypto/tls record padding. - cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) - cc.br = bufio.NewReader(c) - cc.fr = NewFramer(cc.bw, cc.br) - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? - cc.henc = hpack.NewEncoder(&cc.hbuf) - - if t.AllowHTTP { - cc.nextStreamID = 3 - } - - if cs, ok := c.(connectionStater); ok { - state := cs.ConnectionState() - cc.tlsState = &state - } - - initialSettings := []Setting{ - {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxHeaderListSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) - } - - cc.bw.Write(clientPreface) - cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) - cc.bw.Flush() - if cc.werr != nil { - return nil, cc.werr - } - - go cc.readLoop() - return cc, nil -} - -func (cc *ClientConn) setGoAway(f *GoAwayFrame) { - cc.mu.Lock() - defer cc.mu.Unlock() - - old := cc.goAway - cc.goAway = f - - // Merge the previous and current GoAway error frames. - if cc.goAwayDebug == "" { - cc.goAwayDebug = string(f.DebugData()) - } - if old != nil && old.ErrCode != ErrCodeNo { - cc.goAway.ErrCode = old.ErrCode - } - last := f.LastStreamID - for streamID, cs := range cc.streams { - if streamID > last { - select { - case cs.resc <- resAndError{err: errClientConnGotGoAway}: - default: - } - } - } -} - -// CanTakeNewRequest reports whether the connection can take a new request, -// meaning it has not been closed or received or sent a GOAWAY. -func (cc *ClientConn) CanTakeNewRequest() bool { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.canTakeNewRequestLocked() -} - -func (cc *ClientConn) canTakeNewRequestLocked() bool { - if cc.singleUse && cc.nextStreamID > 1 { - return false - } - return cc.goAway == nil && !cc.closed && - int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 -} - -// onIdleTimeout is called from a time.AfterFunc goroutine. It will -// only be called when we're idle, but because we're coming from a new -// goroutine, there could be a new request coming in at the same time, -// so this simply calls the synchronized closeIfIdle to shut down this -// connection. The timer could just call closeIfIdle, but this is more -// clear. -func (cc *ClientConn) onIdleTimeout() { - cc.closeIfIdle() -} - -func (cc *ClientConn) closeIfIdle() { - cc.mu.Lock() - if len(cc.streams) > 0 { - cc.mu.Unlock() - return - } - cc.closed = true - nextID := cc.nextStreamID - // TODO: do clients send GOAWAY too? maybe? Just Close: - cc.mu.Unlock() - - if VerboseLogs { - cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) - } - cc.tconn.Close() -} - -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - -// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not -// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. -var errRequestCanceled = errors.New("net/http: request canceled") - -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - -func (cc *ClientConn) responseHeaderTimeout() time.Duration { - if cc.t.t1 != nil { - return cc.t.t1.ResponseHeaderTimeout - } - // No way to do this (yet?) with just an http2.Transport. Probably - // no need. Request.Cancel this is the new way. We only need to support - // this for compatibility with the old http.Transport fields when - // we're doing transparent http2. - return 0 -} - -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - -// actualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func actualContentLength(req *http.Request) int64 { - if req.Body == nil || reqBodyIsNoBody(req.Body) { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} - -func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - resp, _, err := cc.roundTrip(req) - return resp, err -} - -func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { - if err := checkConnHeaders(req); err != nil { - return nil, false, err - } - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return nil, false, err - } - hasTrailers := trailers != "" - - cc.mu.Lock() - if err := cc.awaitOpenSlotForRequest(req); err != nil { - cc.mu.Unlock() - return nil, false, err - } - - body := req.Body - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - var requestedGzip bool - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - requestedGzip = true - } - - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is - // sent by writeRequestBody below, along with any Trailers, - // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) - if err != nil { - cc.mu.Unlock() - return nil, false, err - } - - cs := cc.newStream() - cs.req = req - cs.trace = requestTrace(req) - cs.requestedGzip = requestedGzip - bodyWriter := cc.t.getBodyWriterState(cs, body) - cs.on100 = bodyWriter.on100 - - cc.wmu.Lock() - endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) - cc.wmu.Unlock() - traceWroteHeaders(cs.trace) - cc.mu.Unlock() - - if werr != nil { - if hasBody { - req.Body.Close() // per RoundTripper contract - bodyWriter.cancel() - } - cc.forgetStreamID(cs.ID) - // Don't bother sending a RST_STREAM (our write already failed; - // no need to keep writing) - traceWroteRequest(cs.trace, werr) - return nil, false, werr - } - - var respHeaderTimer <-chan time.Time - if hasBody { - bodyWriter.scheduleBodyWrite() - } else { - traceWroteRequest(cs.trace, nil) - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - - readLoopResCh := cs.resc - bodyWritten := false - ctx := reqContext(req) - - handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, false, nil - } - - for { - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - case <-respHeaderTimer: - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), errTimeout - case <-ctx.Done(): - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), ctx.Err() - case <-req.Cancel: - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), errRequestCanceled - case <-cs.peerReset: - // processResetStream already removed the - // stream from the streams map; no need for - // forgetStreamID. - return nil, cs.getStartedWrite(), cs.resetErr - case err := <-bodyWriter.resc: - // Prefer the read loop's response, if available. Issue 16102. - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - default: - } - if err != nil { - return nil, cs.getStartedWrite(), err - } - bodyWritten = true - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - } -} - -// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. -// Must hold cc.mu. -func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { - var waitingForConn chan struct{} - var waitingForConnErr error // guarded by cc.mu - for { - cc.lastActive = time.Now() - if cc.closed || !cc.canTakeNewRequestLocked() { - if waitingForConn != nil { - close(waitingForConn) - } - return errClientConnUnusable - } - if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { - if waitingForConn != nil { - close(waitingForConn) - } - return nil - } - // Unfortunately, we cannot wait on a condition variable and channel at - // the same time, so instead, we spin up a goroutine to check if the - // request is canceled while we wait for a slot to open in the connection. - if waitingForConn == nil { - waitingForConn = make(chan struct{}) - go func() { - if err := awaitRequestCancel(req, waitingForConn); err != nil { - cc.mu.Lock() - waitingForConnErr = err - cc.cond.Broadcast() - cc.mu.Unlock() - } - }() - } - cc.pendingRequests++ - cc.cond.Wait() - cc.pendingRequests-- - if waitingForConnErr != nil { - return waitingForConnErr - } - } -} - -// requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { - first := true // first frame written (HEADERS is first, then CONTINUATION) - for len(hdrs) > 0 && cc.werr == nil { - chunk := hdrs - if len(chunk) > maxFrameSize { - chunk = chunk[:maxFrameSize] - } - hdrs = hdrs[len(chunk):] - endHeaders := len(hdrs) == 0 - if first { - cc.fr.WriteHeaders(HeadersFrameParam{ - StreamID: streamID, - BlockFragment: chunk, - EndStream: endStream, - EndHeaders: endHeaders, - }) - first = false - } else { - cc.fr.WriteContinuation(streamID, endHeaders, chunk) - } - } - // TODO(bradfitz): this Flush could potentially block (as - // could the WriteHeaders call(s) above), which means they - // wouldn't respond to Request.Cancel being readable. That's - // rare, but this should probably be in a goroutine. - cc.bw.Flush() - return cc.werr -} - -// internal error values; they don't escape to callers -var ( - // abort request body write; don't send cancel - errStopReqBodyWrite = errors.New("http2: aborting request body write") - - // abort request body write, but send stream reset of cancel. - errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") -) - -func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { - cc := cs.cc - sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) - - defer func() { - traceWroteRequest(cs.trace, err) - // TODO: write h12Compare test showing whether - // Request.Body is closed by the Transport, - // and in multiple cases: server replies <=299 and >299 - // while still writing request body - cerr := bodyCloser.Close() - if err == nil { - err = cerr - } - }() - - req := cs.req - hasTrailers := req.Trailer != nil - - var sawEOF bool - for !sawEOF { - n, err := body.Read(buf) - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - return err - } - - remain := buf[:n] - for len(remain) > 0 && err == nil { - var allowed int32 - allowed, err = cs.awaitFlowControl(len(remain)) - switch { - case err == errStopReqBodyWrite: - return err - case err == errStopReqBodyWriteAndCancel: - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - return err - case err != nil: - return err - } - cc.wmu.Lock() - data := remain[:allowed] - remain = remain[allowed:] - sentEnd = sawEOF && len(remain) == 0 && !hasTrailers - err = cc.fr.WriteData(cs.ID, sentEnd, data) - if err == nil { - // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or - // opt-out? Use some heuristic on the body type? Nagel-like - // timers? Based on 'n'? Only last chunk of this for loop, - // unless flow control tokens are low? For now, always. - // If we change this, see comment below. - err = cc.bw.Flush() - } - cc.wmu.Unlock() - } - if err != nil { - return err - } - } - - if sentEnd { - // Already sent END_STREAM (which implies we have no - // trailers) and flushed, because currently all - // WriteData frames above get a flush. So we're done. - return nil - } - - var trls []byte - if hasTrailers { - cc.mu.Lock() - trls, err = cc.encodeTrailers(req) - cc.mu.Unlock() - if err != nil { - cc.writeStreamReset(cs.ID, ErrCodeInternal, err) - cc.forgetStreamID(cs.ID) - return err - } - } - - cc.mu.Lock() - maxFrameSize := int(cc.maxFrameSize) - cc.mu.Unlock() - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - // Two ways to send END_STREAM: either with trailers, or - // with an empty DATA frame. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } - if ferr := cc.bw.Flush(); ferr != nil && err == nil { - err = ferr - } - return err -} - -// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow -// control tokens from the server. -// It returns either the non-zero number of tokens taken or an error -// if the stream is dead. -func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { - cc := cs.cc - cc.mu.Lock() - defer cc.mu.Unlock() - for { - if cc.closed { - return 0, errClientConnClosed - } - if cs.stopReqBody != nil { - return 0, cs.stopReqBody - } - if err := cs.checkResetOrDone(); err != nil { - return 0, err - } - if a := cs.flow.available(); a > 0 { - take := a - if int(take) > maxBytes { - - take = int32(maxBytes) // can't truncate int; take is int32 - } - if take > int32(cc.maxFrameSize) { - take = int32(cc.maxFrameSize) - } - cs.flow.take(take) - return take, nil - } - cc.cond.Wait() - } -} - -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// requires cc.mu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) - } - } - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - f(":method", req.Method) - if req.Method != "CONNECT" { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || - strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || - strings.EqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if strings.EqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - cc.writeHeader(strings.ToLower(name), value) - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - -// requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { - cc.hbuf.Reset() - - hlSize := uint64(0) - for k, vv := range req.Trailer { - for _, v := range vv { - hf := hpack.HeaderField{Name: k, Value: v} - hlSize += uint64(hf.Size()) - } - } - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filtered at the - // start of RoundTrip - lowKey := strings.ToLower(k) - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - return cc.hbuf.Bytes(), nil -} - -func (cc *ClientConn) writeHeader(name, value string) { - if VerboseLogs { - log.Printf("http2: Transport encoding header %q = %q", name, value) - } - cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) -} - -type resAndError struct { - res *http.Response - err error -} - -// requires cc.mu be held. -func (cc *ClientConn) newStream() *clientStream { - cs := &clientStream{ - cc: cc, - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), - peerReset: make(chan struct{}), - done: make(chan struct{}), - } - cs.flow.add(int32(cc.initialWindowSize)) - cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) - cc.nextStreamID += 2 - cc.streams[cs.ID] = cs - return cs -} - -func (cc *ClientConn) forgetStreamID(id uint32) { - cc.streamByID(id, true) -} - -func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { - cc.mu.Lock() - defer cc.mu.Unlock() - cs := cc.streams[id] - if andRemove && cs != nil && !cc.closed { - cc.lastActive = time.Now() - delete(cc.streams, id) - if len(cc.streams) == 0 && cc.idleTimer != nil { - cc.idleTimer.Reset(cc.idleTimeout) - } - close(cs.done) - // Wake up checkResetOrDone via clientStream.awaitFlowControl and - // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() - } - return cs -} - -// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. -type clientConnReadLoop struct { - cc *ClientConn - closeWhenIdle bool -} - -// readLoop runs in its own goroutine and reads and dispatches frames. -func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{cc: cc} - defer rl.cleanup() - cc.readerErr = rl.run() - if ce, ok := cc.readerErr.(ConnectionError); ok { - cc.wmu.Lock() - cc.fr.WriteGoAway(0, ErrCode(ce), nil) - cc.wmu.Unlock() - } -} - -// GoAwayError is returned by the Transport when the server closes the -// TCP connection after sending a GOAWAY frame. -type GoAwayError struct { - LastStreamID uint32 - ErrCode ErrCode - DebugData string -} - -func (e GoAwayError) Error() string { - return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", - e.LastStreamID, e.ErrCode, e.DebugData) -} - -func isEOFOrNetReadError(err error) bool { - if err == io.EOF { - return true - } - ne, ok := err.(*net.OpError) - return ok && ne.Op == "read" -} - -func (rl *clientConnReadLoop) cleanup() { - cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) - defer close(cc.readerDone) - - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - // Close any response bodies if the server closes prematurely. - // TODO: also do this if we've written the headers but not - // gotten a response yet. - err := cc.readerErr - cc.mu.Lock() - if cc.goAway != nil && isEOFOrNetReadError(err) { - err = GoAwayError{ - LastStreamID: cc.goAway.LastStreamID, - ErrCode: cc.goAway.ErrCode, - DebugData: cc.goAwayDebug, - } - } else if err == io.EOF { - err = io.ErrUnexpectedEOF - } - for _, cs := range cc.streams { - cs.bufPipe.CloseWithError(err) // no-op if already closed - select { - case cs.resc <- resAndError{err: err}: - default: - } - close(cs.done) - } - cc.closed = true - cc.cond.Broadcast() - cc.mu.Unlock() -} - -func (rl *clientConnReadLoop) run() error { - cc := rl.cc - rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a HEADERS reply - gotSettings := false - for { - f, err := cc.fr.ReadFrame() - if err != nil { - cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) - } - if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, false); cs != nil { - cs.cc.writeStreamReset(cs.ID, se.Code, err) - cs.cc.forgetStreamID(cs.ID) - if se.Cause == nil { - se.Cause = cc.fr.errDetail - } - rl.endStreamError(cs, se) - } - continue - } else if err != nil { - return err - } - if VerboseLogs { - cc.vlogf("http2: Transport received %s", summarizeFrame(f)) - } - if !gotSettings { - if _, ok := f.(*SettingsFrame); !ok { - cc.logf("protocol error: received %T before a SETTINGS frame", f) - return ConnectionError(ErrCodeProtocol) - } - gotSettings = true - } - maybeIdle := false // whether frame might transition us to idle - - switch f := f.(type) { - case *MetaHeadersFrame: - err = rl.processHeaders(f) - maybeIdle = true - gotReply = true - case *DataFrame: - err = rl.processData(f) - maybeIdle = true - case *GoAwayFrame: - err = rl.processGoAway(f) - maybeIdle = true - case *RSTStreamFrame: - err = rl.processResetStream(f) - maybeIdle = true - case *SettingsFrame: - err = rl.processSettings(f) - case *PushPromiseFrame: - err = rl.processPushPromise(f) - case *WindowUpdateFrame: - err = rl.processWindowUpdate(f) - case *PingFrame: - err = rl.processPing(f) - default: - cc.logf("Transport: unhandled response frame type %T", f) - } - if err != nil { - if VerboseLogs { - cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) - } - return err - } - if rl.closeWhenIdle && gotReply && maybeIdle { - cc.closeIfIdle() - } - } -} - -func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) - if cs == nil { - // We'd get here if we canceled a request while the - // server had its response still in flight. So if this - // was just something we canceled, ignore it. - return nil - } - if f.StreamEnded() { - // Issue 20521: If the stream has ended, streamByID() causes - // clientStream.done to be closed, which causes the request's bodyWriter - // to be closed with an errStreamClosed, which may be received by - // clientConn.RoundTrip before the result of processing these headers. - // Deferring stream closure allows the header processing to occur first. - // clientConn.RoundTrip may still receive the bodyWriter error first, but - // the fix for issue 16102 prioritises any response. - // - // Issue 22413: If there is no request body, we should close the - // stream before writing to cs.resc so that the stream is closed - // immediately once RoundTrip returns. - if cs.req.Body != nil { - defer cc.forgetStreamID(f.StreamID) - } else { - cc.forgetStreamID(f.StreamID) - } - } - if !cs.firstByte { - if cs.trace != nil { - // TODO(bradfitz): move first response byte earlier, - // when we first read the 9 byte header, not waiting - // until all the HEADERS+CONTINUATION frames have been - // merged. This works for now. - traceFirstResponseByte(cs.trace) - } - cs.firstByte = true - } - if !cs.pastHeaders { - cs.pastHeaders = true - } else { - return rl.processTrailers(cs, f) - } - - res, err := rl.handleResponse(cs, f) - if err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // Any other error type is a stream error. - cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) - cc.forgetStreamID(cs.ID) - cs.resc <- resAndError{err: err} - return nil // return nil from process* funcs to keep conn alive - } - if res == nil { - // (nil, nil) special case. See handleResponse docs. - return nil - } - cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} - return nil -} - -// may return error types nil, or ConnectionError. Any other error value -// is a StreamError of type ErrCodeProtocol. The returned error in that case -// is the detail. -// -// As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. -func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { - if f.Truncated { - return nil, errResponseHeaderListSize - } - - status := f.PseudoValue("status") - if status == "" { - return nil, errors.New("malformed response from server: missing status pseudo header") - } - statusCode, err := strconv.Atoi(status) - if err != nil { - return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") - } - - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - cs.pastHeaders = false // do it all again - return nil, nil - } - - header := make(http.Header) - res := &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: header, - StatusCode: statusCode, - Status: status + " " + http.StatusText(statusCode), - } - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - if key == "Trailer" { - t := res.Trailer - if t == nil { - t = make(http.Header) - res.Trailer = t - } - foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - header[key] = append(header[key], hf.Value) - } - } - - streamEnded := f.StreamEnded() - isHead := cs.req.Method == "HEAD" - if !streamEnded || isHead { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 - } else { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } else if len(clens) > 1 { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } - - if streamEnded || isHead { - res.Body = noBody - return res, nil - } - - cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(cs.req) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - setResponseUncompressed(res) - } - return res, nil -} - -func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { - if cs.pastTrailers { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) - } - cs.pastTrailers = true - if !f.StreamEnded() { - // We expect that any headers for trailers also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - if len(f.PseudoFields()) > 0 { - // No pseudo header fields are defined for trailers. - // TODO: ConnectionError might be overly harsh? Check. - return ConnectionError(ErrCodeProtocol) - } - - trailer := make(http.Header) - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - trailer[key] = append(trailer[key], hf.Value) - } - cs.trailer = trailer - - rl.endStream(cs) - return nil -} - -// transportResponseBody is the concrete type of Transport.RoundTrip's -// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. -// On Close it sends RST_STREAM if EOF wasn't already seen. -type transportResponseBody struct { - cs *clientStream -} - -func (b transportResponseBody) Read(p []byte) (n int, err error) { - cs := b.cs - cc := cs.cc - - if cs.readErr != nil { - return 0, cs.readErr - } - n, err = b.cs.bufPipe.Read(p) - if cs.bytesRemain != -1 { - if int64(n) > cs.bytesRemain { - n = int(cs.bytesRemain) - if err == nil { - err = errors.New("net/http: server replied with more than declared Content-Length; truncated") - cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) - } - cs.readErr = err - return int(cs.bytesRemain), err - } - cs.bytesRemain -= int64(n) - if err == io.EOF && cs.bytesRemain > 0 { - err = io.ErrUnexpectedEOF - cs.readErr = err - return n, err - } - } - if n == 0 { - // No flow control tokens to send back. - return - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } - if err == nil { // No need to refresh if the stream is over or failed. - // Consider any buffered body data (read from the conn but not - // consumed by the client) when computing flow control for this - // stream. - v := int(cs.inflow.available()) + cs.bufPipe.Len() - if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = int32(transportDefaultStreamFlow - v) - cs.inflow.add(streamAdd) - } - } - if connAdd != 0 || streamAdd != 0 { - cc.wmu.Lock() - defer cc.wmu.Unlock() - if connAdd != 0 { - cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) - } - if streamAdd != 0 { - cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) - } - cc.bw.Flush() - } - return -} - -var errClosedResponseBody = errors.New("http2: response body closed") - -func (b transportResponseBody) Close() error { - cs := b.cs - cc := cs.cc - - serverSentStreamEnd := cs.bufPipe.Err() == io.EOF - unread := cs.bufPipe.Len() - - if unread > 0 || !serverSentStreamEnd { - cc.mu.Lock() - cc.wmu.Lock() - if !serverSentStreamEnd { - cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) - cs.didReset = true - } - // Return connection-level flow control. - if unread > 0 { - cc.inflow.add(int32(unread)) - cc.fr.WriteWindowUpdate(0, uint32(unread)) - } - cc.bw.Flush() - cc.wmu.Unlock() - cc.mu.Unlock() - } - - cs.bufPipe.BreakWithError(errClosedResponseBody) - cc.forgetStreamID(cs.ID) - return nil -} - -func (rl *clientConnReadLoop) processData(f *DataFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - data := f.Data() - if cs == nil { - cc.mu.Lock() - neverSent := cc.nextStreamID - cc.mu.Unlock() - if f.StreamID >= neverSent { - // We never asked for this. - cc.logf("http2: Transport received unsolicited DATA frame; closing connection") - return ConnectionError(ErrCodeProtocol) - } - // We probably did ask for this, but canceled. Just ignore it. - // TODO: be stricter here? only silently ignore things which - // we canceled, but not things which were closed normally - // by the peer? Tough without accumulating too much state. - - // But at least return their flow control: - if f.Length > 0 { - cc.mu.Lock() - cc.inflow.add(int32(f.Length)) - cc.mu.Unlock() - - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(f.Length)) - cc.bw.Flush() - cc.wmu.Unlock() - } - return nil - } - if !cs.firstByte { - cc.logf("protocol error: received DATA before a HEADERS frame") - rl.endStreamError(cs, StreamError{ - StreamID: f.StreamID, - Code: ErrCodeProtocol, - }) - return nil - } - if f.Length > 0 { - if cs.req.Method == "HEAD" && len(data) > 0 { - cc.logf("protocol error: received DATA on a HEAD request") - rl.endStreamError(cs, StreamError{ - StreamID: f.StreamID, - Code: ErrCodeProtocol, - }) - return nil - } - // Check connection-level flow control. - cc.mu.Lock() - if cs.inflow.available() >= int32(f.Length) { - cs.inflow.take(int32(f.Length)) - } else { - cc.mu.Unlock() - return ConnectionError(ErrCodeFlowControl) - } - // Return any padded flow control now, since we won't - // refund it later on body reads. - var refund int - if pad := int(f.Length) - len(data); pad > 0 { - refund += pad - } - // Return len(data) now if the stream is already closed, - // since data will never be read. - didReset := cs.didReset - if didReset { - refund += len(data) - } - if refund > 0 { - cc.inflow.add(int32(refund)) - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(refund)) - if !didReset { - cs.inflow.add(int32(refund)) - cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) - } - cc.bw.Flush() - cc.wmu.Unlock() - } - cc.mu.Unlock() - - if len(data) > 0 && !didReset { - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err - } - } - } - - if f.StreamEnded() { - rl.endStream(cs) - } - return nil -} - -var errInvalidTrailers = errors.New("http2: invalid trailers") - -func (rl *clientConnReadLoop) endStream(cs *clientStream) { - // TODO: check that any declared content-length matches, like - // server.go's (*stream).endStream method. - rl.endStreamError(cs, nil) -} - -func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { - var code func() - if err == nil { - err = io.EOF - code = cs.copyTrailers - } - if isConnectionCloseRequest(cs.req) { - rl.closeWhenIdle = true - } - cs.bufPipe.closeWithErrorAndCode(err, code) - - select { - case cs.resc <- resAndError{err: err}: - default: - } -} - -func (cs *clientStream) copyTrailers() { - for k, vv := range cs.trailer { - t := cs.resTrailer - if *t == nil { - *t = make(http.Header) - } - (*t)[k] = vv - } -} - -func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { - cc := rl.cc - cc.t.connPool().MarkDead(cc) - if f.ErrCode != 0 { - // TODO: deal with GOAWAY more. particularly the error code - cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) - } - cc.setGoAway(f) - return nil -} - -func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - - if f.IsAck() { - if cc.wantSettingsAck { - cc.wantSettingsAck = false - return nil - } - return ConnectionError(ErrCodeProtocol) - } - - err := f.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingMaxHeaderListSize: - cc.peerMaxHeaderListSize = uint64(s.Val) - case SettingInitialWindowSize: - // Values above the maximum flow-control - // window size of 2^31-1 MUST be treated as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - if s.Val > math.MaxInt32 { - return ConnectionError(ErrCodeFlowControl) - } - - // Adjust flow control of currently-open - // frames by the difference of the old initial - // window size and this one. - delta := int32(s.Val) - int32(cc.initialWindowSize) - for _, cs := range cc.streams { - cs.flow.add(delta) - } - cc.cond.Broadcast() - - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. - cc.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - if err != nil { - return err - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - cc.fr.WriteSettingsAck() - cc.bw.Flush() - return cc.werr -} - -func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) - if f.StreamID != 0 && cs == nil { - return nil - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - fl := &cc.flow - if cs != nil { - fl = &cs.flow - } - if !fl.add(int32(f.Increment)) { - return ConnectionError(ErrCodeFlowControl) - } - cc.cond.Broadcast() - return nil -} - -func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.cc.streamByID(f.StreamID, true) - if cs == nil { - // TODO: return error if server tries to RST_STEAM an idle stream - return nil - } - select { - case <-cs.peerReset: - // Already reset. - // This is the only goroutine - // which closes this, so there - // isn't a race. - default: - err := streamError(cs.ID, f.ErrCode) - cs.resetErr = err - close(cs.peerReset) - cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - return nil -} - -// Ping sends a PING frame to the server and waits for the ack. -// Public implementation is in go17.go and not_go17.go -func (cc *ClientConn) ping(ctx contextContext) error { - c := make(chan struct{}) - // Generate a random payload - var p [8]byte - for { - if _, err := rand.Read(p[:]); err != nil { - return err - } - cc.mu.Lock() - // check for dup before insert - if _, found := cc.pings[p]; !found { - cc.pings[p] = c - cc.mu.Unlock() - break - } - cc.mu.Unlock() - } - cc.wmu.Lock() - if err := cc.fr.WritePing(false, p); err != nil { - cc.wmu.Unlock() - return err - } - if err := cc.bw.Flush(); err != nil { - cc.wmu.Unlock() - return err - } - cc.wmu.Unlock() - select { - case <-c: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-cc.readerDone: - // connection closed - return cc.readerErr - } -} - -func (rl *clientConnReadLoop) processPing(f *PingFrame) error { - if f.IsAck() { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - // If ack, notify listener if any - if c, ok := cc.pings[f.Data]; ok { - close(c) - delete(cc.pings, f.Data) - } - return nil - } - cc := rl.cc - cc.wmu.Lock() - defer cc.wmu.Unlock() - if err := cc.fr.WritePing(true, f.Data); err != nil { - return err - } - return cc.bw.Flush() -} - -func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { - // We told the peer we don't want them. - // Spec says: - // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH - // setting of the peer endpoint is set to 0. An endpoint that - // has set this setting and has received acknowledgement MUST - // treat the receipt of a PUSH_PROMISE frame as a connection - // error (Section 5.4.1) of type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) -} - -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: map err to more interesting error codes, once the - // HTTP community comes up with some. But currently for - // RST_STREAM there's no equivalent to GOAWAY frame's debug - // data, and the error codes are all pretty vague ("cancel"). - cc.wmu.Lock() - cc.fr.WriteRSTStream(streamID, code) - cc.bw.Flush() - cc.wmu.Unlock() -} - -var ( - errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") - errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") -) - -func (cc *ClientConn) logf(format string, args ...interface{}) { - cc.t.logf(format, args...) -} - -func (cc *ClientConn) vlogf(format string, args ...interface{}) { - cc.t.vlogf(format, args...) -} - -func (t *Transport) vlogf(format string, args ...interface{}) { - if VerboseLogs { - t.logf(format, args...) - } -} - -func (t *Transport) logf(format string, args ...interface{}) { - log.Printf(format, args...) -} - -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) - -func strSliceContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -type erringRoundTripper struct{ err error } - -func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } - -// gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read -type gzipReader struct { - body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error -} - -func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr - } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) -} - -func (gz *gzipReader) Close() error { - return gz.body.Close() -} - -type errorReader struct{ err error } - -func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } - -// bodyWriterState encapsulates various state around the Transport's writing -// of the request body, particularly regarding doing delayed writes of the body -// when the request contains "Expect: 100-continue". -type bodyWriterState struct { - cs *clientStream - timer *time.Timer // if non-nil, we're doing a delayed write - fnonce *sync.Once // to call fn with - fn func() // the code to run in the goroutine, writing the body - resc chan error // result of fn's execution - delay time.Duration // how long we should delay a delayed write for -} - -func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { - s.cs = cs - if body == nil { - return - } - resc := make(chan error, 1) - s.resc = resc - s.fn = func() { - cs.cc.mu.Lock() - cs.startedWrite = true - cs.cc.mu.Unlock() - resc <- cs.writeRequestBody(body, cs.req.Body) - } - s.delay = t.expectContinueTimeout() - if s.delay == 0 || - !httpguts.HeaderValuesContainsToken( - cs.req.Header["Expect"], - "100-continue") { - return - } - s.fnonce = new(sync.Once) - - // Arm the timer with a very large duration, which we'll - // intentionally lower later. It has to be large now because - // we need a handle to it before writing the headers, but the - // s.delay value is defined to not start until after the - // request headers were written. - const hugeDuration = 365 * 24 * time.Hour - s.timer = time.AfterFunc(hugeDuration, func() { - s.fnonce.Do(s.fn) - }) - return -} - -func (s bodyWriterState) cancel() { - if s.timer != nil { - s.timer.Stop() - } -} - -func (s bodyWriterState) on100() { - if s.timer == nil { - // If we didn't do a delayed write, ignore the server's - // bogus 100 continue response. - return - } - s.timer.Stop() - go func() { s.fnonce.Do(s.fn) }() -} - -// scheduleBodyWrite starts writing the body, either immediately (in -// the common case) or after the delay timeout. It should not be -// called until after the headers have been written. -func (s bodyWriterState) scheduleBodyWrite() { - if s.timer == nil { - // We're not doing a delayed write (see - // getBodyWriterState), so just start the writing - // goroutine immediately. - go s.fn() - return - } - traceWait100Continue(s.cs.trace) - if s.timer.Stop() { - s.timer.Reset(s.delay) - } -} - -// isConnectionCloseRequest reports whether req should use its own -// connection for a single request and then close the connection. -func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") -} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go deleted file mode 100644 index 8a9711f..0000000 --- a/vendor/golang.org/x/net/http2/write.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "fmt" - "log" - "net/http" - "net/url" - - "golang.org/x/net/http/httpguts" - "golang.org/x/net/http2/hpack" -) - -// writeFramer is implemented by any type that is used to write frames. -type writeFramer interface { - writeFrame(writeContext) error - - // staysWithinBuffer reports whether this writer promises that - // it will only write less than or equal to size bytes, and it - // won't Flush the write context. - staysWithinBuffer(size int) bool -} - -// writeContext is the interface needed by the various frame writer -// types below. All the writeFrame methods below are scheduled via the -// frame writing scheduler (see writeScheduler in writesched.go). -// -// This interface is implemented by *serverConn. -// -// TODO: decide whether to a) use this in the client code (which didn't -// end up using this yet, because it has a simpler design, not -// currently implementing priorities), or b) delete this and -// make the server code a bit more concrete. -type writeContext interface { - Framer() *Framer - Flush() error - CloseConn() error - // HeaderEncoder returns an HPACK encoder that writes to the - // returned buffer. - HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) -} - -// writeEndsStream reports whether w writes a frame that will transition -// the stream to a half-closed local state. This returns false for RST_STREAM, -// which closes the entire stream (not just the local half). -func writeEndsStream(w writeFramer) bool { - switch v := w.(type) { - case *writeData: - return v.endStream - case *writeResHeaders: - return v.endStream - case nil: - // This can only happen if the caller reuses w after it's - // been intentionally nil'ed out to prevent use. Keep this - // here to catch future refactoring breaking it. - panic("writeEndsStream called on nil writeFramer") - } - return false -} - -type flushFrameWriter struct{} - -func (flushFrameWriter) writeFrame(ctx writeContext) error { - return ctx.Flush() -} - -func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } - -type writeSettings []Setting - -func (s writeSettings) staysWithinBuffer(max int) bool { - const settingSize = 6 // uint16 + uint32 - return frameHeaderLen+settingSize*len(s) <= max - -} - -func (s writeSettings) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettings([]Setting(s)...) -} - -type writeGoAway struct { - maxStreamID uint32 - code ErrCode -} - -func (p *writeGoAway) writeFrame(ctx writeContext) error { - err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - ctx.Flush() // ignore error: we're hanging up on them anyway - return err -} - -func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes - -type writeData struct { - streamID uint32 - p []byte - endStream bool -} - -func (w *writeData) String() string { - return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) -} - -func (w *writeData) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) -} - -func (w *writeData) staysWithinBuffer(max int) bool { - return frameHeaderLen+len(w.p) <= max -} - -// handlerPanicRST is the message sent from handler goroutines when -// the handler panics. -type handlerPanicRST struct { - StreamID uint32 -} - -func (hp handlerPanicRST) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) -} - -func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (se StreamError) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) -} - -func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -type writePingAck struct{ pf *PingFrame } - -func (w writePingAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WritePing(true, w.pf.Data) -} - -func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } - -type writeSettingsAck struct{} - -func (writeSettingsAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettingsAck() -} - -func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } - -// splitHeaderBlock splits headerBlock into fragments so that each fragment fits -// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true -// for the first/last fragment, respectively. -func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { - // For now we're lazy and just pick the minimum MAX_FRAME_SIZE - // that all peers must support (16KB). Later we could care - // more and send larger frames if the peer advertised it, but - // there's little point. Most headers are small anyway (so we - // generally won't have CONTINUATION frames), and extra frames - // only waste 9 bytes anyway. - const maxFrameSize = 16384 - - first := true - for len(headerBlock) > 0 { - frag := headerBlock - if len(frag) > maxFrameSize { - frag = frag[:maxFrameSize] - } - headerBlock = headerBlock[len(frag):] - if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { - return err - } - first = false - } - return nil -} - -// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames -// for HTTP response headers or trailers from a server handler. -type writeResHeaders struct { - streamID uint32 - httpResCode int // 0 means no ":status" line - h http.Header // may be nil - trailers []string // if non-nil, which keys of h to write. nil means all. - endStream bool - - date string - contentType string - contentLength string -} - -func encKV(enc *hpack.Encoder, k, v string) { - if VerboseLogs { - log.Printf("http2: server encoding header %q = %q", k, v) - } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) -} - -func (w *writeResHeaders) staysWithinBuffer(max int) bool { - // TODO: this is a common one. It'd be nice to return true - // here and get into the fast path if we could be clever and - // calculate the size fast enough, or at least a conservative - // uppper bound that usually fires. (Maybe if w.h and - // w.trailers are nil, so we don't need to enumerate it.) - // Otherwise I'm afraid that just calculating the length to - // answer this question would be slower than the ~2µs benefit. - return false -} - -func (w *writeResHeaders) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - if w.httpResCode != 0 { - encKV(enc, ":status", httpCodeString(w.httpResCode)) - } - - encodeHeaders(enc, w.h, w.trailers) - - if w.contentType != "" { - encKV(enc, "content-type", w.contentType) - } - if w.contentLength != "" { - encKV(enc, "content-length", w.contentLength) - } - if w.date != "" { - encKV(enc, "date", w.date) - } - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 && w.trailers == nil { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: frag, - EndStream: w.endStream, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. -type writePushPromise struct { - streamID uint32 // pusher stream - method string // for :method - url *url.URL // for :scheme, :authority, :path - h http.Header - - // Creates an ID for a pushed stream. This runs on serveG just before - // the frame is written. The returned ID is copied to promisedID. - allocatePromisedID func() (uint32, error) - promisedID uint32 -} - -func (w *writePushPromise) staysWithinBuffer(max int) bool { - // TODO: see writeResHeaders.staysWithinBuffer - return false -} - -func (w *writePushPromise) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - encKV(enc, ":method", w.method) - encKV(enc, ":scheme", w.url.Scheme) - encKV(enc, ":authority", w.url.Host) - encKV(enc, ":path", w.url.RequestURI()) - encodeHeaders(enc, w.h, nil) - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WritePushPromise(PushPromiseParam{ - StreamID: w.streamID, - PromiseID: w.promisedID, - BlockFragment: frag, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -type write100ContinueHeadersFrame struct { - streamID uint32 -} - -func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - encKV(enc, ":status", "100") - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: buf.Bytes(), - EndStream: false, - EndHeaders: true, - }) -} - -func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { - // Sloppy but conservative: - return 9+2*(len(":status")+len("100")) <= max -} - -type writeWindowUpdate struct { - streamID uint32 // or 0 for conn-level - n uint32 -} - -func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) -} - -// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) -// is encoded only only if k is in keys. -func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - if keys == nil { - sorter := sorterPool.Get().(*sorter) - // Using defer here, since the returned keys from the - // sorter.Keys method is only valid until the sorter - // is returned: - defer sorterPool.Put(sorter) - keys = sorter.Keys(h) - } - for _, k := range keys { - vv := h[k] - k = lowerHeader(k) - if !validWireHeaderFieldName(k) { - // Skip it as backup paranoia. Per - // golang.org/issue/14048, these should - // already be rejected at a higher level. - continue - } - isTE := k == "transfer-encoding" - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // TODO: return an error? golang.org/issue/14048 - // For now just omit it. - continue - } - // TODO: more of "8.1.2.2 Connection-Specific Header Fields" - if isTE && v != "trailers" { - continue - } - encKV(enc, k, v) - } - } -} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go deleted file mode 100644 index 4fe3073..0000000 --- a/vendor/golang.org/x/net/http2/writesched.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "fmt" - -// WriteScheduler is the interface implemented by HTTP/2 write schedulers. -// Methods are never called concurrently. -type WriteScheduler interface { - // OpenStream opens a new stream in the write scheduler. - // It is illegal to call this with streamID=0 or with a streamID that is - // already open -- the call may panic. - OpenStream(streamID uint32, options OpenStreamOptions) - - // CloseStream closes a stream in the write scheduler. Any frames queued on - // this stream should be discarded. It is illegal to call this on a stream - // that is not open -- the call may panic. - CloseStream(streamID uint32) - - // AdjustStream adjusts the priority of the given stream. This may be called - // on a stream that has not yet been opened or has been closed. Note that - // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: - // https://tools.ietf.org/html/rfc7540#section-5.1 - AdjustStream(streamID uint32, priority PriorityParam) - - // Push queues a frame in the scheduler. In most cases, this will not be - // called with wr.StreamID()!=0 unless that stream is currently open. The one - // exception is RST_STREAM frames, which may be sent on idle or closed streams. - Push(wr FrameWriteRequest) - - // Pop dequeues the next frame to write. Returns false if no frames can - // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. - Pop() (wr FrameWriteRequest, ok bool) -} - -// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. -type OpenStreamOptions struct { - // PusherID is zero if the stream was initiated by the client. Otherwise, - // PusherID names the stream that pushed the newly opened stream. - PusherID uint32 -} - -// FrameWriteRequest is a request to write a frame. -type FrameWriteRequest struct { - // write is the interface value that does the writing, once the - // WriteScheduler has selected this frame to write. The write - // functions are all defined in write.go. - write writeFramer - - // stream is the stream on which this frame will be written. - // nil for non-stream frames like PING and SETTINGS. - stream *stream - - // done, if non-nil, must be a buffered channel with space for - // 1 message and is sent the return value from write (or an - // earlier error) when the frame has been written. - done chan error -} - -// StreamID returns the id of the stream this frame will be written to. -// 0 is used for non-stream frames such as PING and SETTINGS. -func (wr FrameWriteRequest) StreamID() uint32 { - if wr.stream == nil { - if se, ok := wr.write.(StreamError); ok { - // (*serverConn).resetStream doesn't set - // stream because it doesn't necessarily have - // one. So special case this type of write - // message. - return se.StreamID - } - return 0 - } - return wr.stream.id -} - -// DataSize returns the number of flow control bytes that must be consumed -// to write this entire frame. This is 0 for non-DATA frames. -func (wr FrameWriteRequest) DataSize() int { - if wd, ok := wr.write.(*writeData); ok { - return len(wd.p) - } - return 0 -} - -// Consume consumes min(n, available) bytes from this frame, where available -// is the number of flow control bytes available on the stream. Consume returns -// 0, 1, or 2 frames, where the integer return value gives the number of frames -// returned. -// -// If flow control prevents consuming any bytes, this returns (_, _, 0). If -// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this -// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and -// 'rest' contains the remaining bytes. The consumed bytes are deducted from the -// underlying stream's flow control budget. -func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { - var empty FrameWriteRequest - - // Non-DATA frames are always consumed whole. - wd, ok := wr.write.(*writeData) - if !ok || len(wd.p) == 0 { - return wr, empty, 1 - } - - // Might need to split after applying limits. - allowed := wr.stream.flow.available() - if n < allowed { - allowed = n - } - if wr.stream.sc.maxFrameSize < allowed { - allowed = wr.stream.sc.maxFrameSize - } - if allowed <= 0 { - return empty, empty, 0 - } - if len(wd.p) > int(allowed) { - wr.stream.flow.take(allowed) - consumed := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[:allowed], - // Even if the original had endStream set, there - // are bytes remaining because len(wd.p) > allowed, - // so we know endStream is false. - endStream: false, - }, - // Our caller is blocking on the final DATA frame, not - // this intermediate frame, so no need to wait. - done: nil, - } - rest := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[allowed:], - endStream: wd.endStream, - }, - done: wr.done, - } - return consumed, rest, 2 - } - - // The frame is consumed whole. - // NB: This cast cannot overflow because allowed is <= math.MaxInt32. - wr.stream.flow.take(int32(len(wd.p))) - return wr, empty, 1 -} - -// String is for debugging only. -func (wr FrameWriteRequest) String() string { - var des string - if s, ok := wr.write.(fmt.Stringer); ok { - des = s.String() - } else { - des = fmt.Sprintf("%T", wr.write) - } - return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) -} - -// replyToWriter sends err to wr.done and panics if the send must block -// This does nothing if wr.done is nil. -func (wr *FrameWriteRequest) replyToWriter(err error) { - if wr.done == nil { - return - } - select { - case wr.done <- err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) - } - wr.write = nil // prevent use (assume it's tainted after wr.done send) -} - -// writeQueue is used by implementations of WriteScheduler. -type writeQueue struct { - s []FrameWriteRequest -} - -func (q *writeQueue) empty() bool { return len(q.s) == 0 } - -func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) -} - -func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { - panic("invalid use of queue") - } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] - return wr -} - -// consume consumes up to n bytes from q.s[0]. If the frame is -// entirely consumed, it is removed from the queue. If the frame -// is partially consumed, the frame is kept with the consumed -// bytes removed. Returns true iff any bytes were consumed. -func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { - return FrameWriteRequest{}, false - } - consumed, rest, numresult := q.s[0].Consume(n) - switch numresult { - case 0: - return FrameWriteRequest{}, false - case 1: - q.shift() - case 2: - q.s[0] = rest - } - return consumed, true -} - -type writeQueuePool []*writeQueue - -// put inserts an unused writeQueue into the pool. -func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} - } - q.s = q.s[:0] - *p = append(*p, q) -} - -// get returns an empty writeQueue. -func (p *writeQueuePool) get() *writeQueue { - ln := len(*p) - if ln == 0 { - return new(writeQueue) - } - x := ln - 1 - q := (*p)[x] - (*p)[x] = nil - *p = (*p)[:x] - return q -} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go deleted file mode 100644 index 848fed6..0000000 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "fmt" - "math" - "sort" -) - -// RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 - -// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. -type PriorityWriteSchedulerConfig struct { - // MaxClosedNodesInTree controls the maximum number of closed streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // "It is possible for a stream to become closed while prioritization - // information ... is in transit. ... This potentially creates suboptimal - // prioritization, since the stream could be given a priority that is - // different from what is intended. To avoid these problems, an endpoint - // SHOULD retain stream prioritization state for a period after streams - // become closed. The longer state is retained, the lower the chance that - // streams are assigned incorrect or default priority values." - MaxClosedNodesInTree int - - // MaxIdleNodesInTree controls the maximum number of idle streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // Similarly, streams that are in the "idle" state can be assigned - // priority or become a parent of other streams. This allows for the - // creation of a grouping node in the dependency tree, which enables - // more flexible expressions of priority. Idle streams begin with a - // default priority (Section 5.3.5). - MaxIdleNodesInTree int - - // ThrottleOutOfOrderWrites enables write throttling to help ensure that - // data is delivered in priority order. This works around a race where - // stream B depends on stream A and both streams are about to call Write - // to queue DATA frames. If B wins the race, a naive scheduler would eagerly - // write as much data from B as possible, but this is suboptimal because A - // is a higher-priority stream. With throttling enabled, we write a small - // amount of data from B to minimize the amount of bandwidth that B can - // steal from A. - ThrottleOutOfOrderWrites bool -} - -// NewPriorityWriteScheduler constructs a WriteScheduler that schedules -// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. -// If cfg is nil, default options are used. -func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { - if cfg == nil { - // For justification of these defaults, see: - // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY - cfg = &PriorityWriteSchedulerConfig{ - MaxClosedNodesInTree: 10, - MaxIdleNodesInTree: 10, - ThrottleOutOfOrderWrites: false, - } - } - - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), - maxClosedNodesInTree: cfg.MaxClosedNodesInTree, - maxIdleNodesInTree: cfg.MaxIdleNodesInTree, - enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, - } - ws.nodes[0] = &ws.root - if cfg.ThrottleOutOfOrderWrites { - ws.writeThrottleLimit = 1024 - } else { - ws.writeThrottleLimit = math.MaxInt32 - } - return ws -} - -type priorityNodeState int - -const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle -) - -// priorityNode is a node in an HTTP/2 priority tree. -// Each node is associated with a single stream ID. -// See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree - - // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings -} - -func (n *priorityNode) setParent(parent *priorityNode) { - if n == parent { - panic("setParent to self") - } - if n.parent == parent { - return - } - // Unlink from current parent. - if parent := n.parent; parent != nil { - if n.prev == nil { - parent.kids = n.next - } else { - n.prev.next = n.next - } - if n.next != nil { - n.next.prev = n.prev - } - } - // Link to new parent. - // If parent=nil, remove n from the tree. - // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). - n.parent = parent - if parent == nil { - n.next = nil - n.prev = nil - } else { - n.next = parent.kids - n.prev = nil - if n.next != nil { - n.next.prev = n - } - parent.kids = n - } -} - -func (n *priorityNode) addBytes(b int64) { - n.bytes += b - for ; n != nil; n = n.parent { - n.subtreeBytes += b - } -} - -// walkReadyInOrder iterates over the tree in priority order, calling f for each node -// with a non-empty write queue. When f returns true, this funcion returns true and the -// walk halts. tmp is used as scratch space for sorting. -// -// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true -// if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { - if !n.q.empty() && f(n, openParent) { - return true - } - if n.kids == nil { - return false - } - - // Don't consider the root "open" when updating openParent since - // we can't send data frames on the root stream (only control frames). - if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) - } - - // Common case: only one kid or all kids have the same weight. - // Some clients don't use weights; other clients (like web browsers) - // use mostly-linear priority trees. - w := n.kids.weight - needSort := false - for k := n.kids.next; k != nil; k = k.next { - if k.weight != w { - needSort = true - break - } - } - if !needSort { - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false - } - - // Uncommon case: sort the child nodes. We remove the kids from the parent, - // then re-insert after sorting so we can reuse tmp for future sort calls. - *tmp = (*tmp)[:0] - for n.kids != nil { - *tmp = append(*tmp, n.kids) - n.kids.setParent(nil) - } - sort.Sort(sortPriorityNodeSiblings(*tmp)) - for i := len(*tmp) - 1; i >= 0; i-- { - (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids - } - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false -} - -type sortPriorityNodeSiblings []*priorityNode - -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { - // Prefer the subtree that has sent fewer bytes relative to its weight. - // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) - if bi == 0 && bk == 0 { - return wi >= wk - } - if bk == 0 { - return false - } - return bi/bk <= wi/wk -} - -type priorityWriteScheduler struct { - // root is the root of the priority tree, where root.id = 0. - // The root queues control frames that are not associated with any stream. - root priorityNode - - // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode - - // maxID is the maximum stream id in nodes. - maxID uint32 - - // lists of nodes that have been closed or are idle, but are kept in - // the tree for improved prioritization. When the lengths exceed either - // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode - - // From the config. - maxClosedNodesInTree int - maxIdleNodesInTree int - writeThrottleLimit int32 - enableWriteThrottle bool - - // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // The stream may be currently idle but cannot be opened or closed. - if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { - panic(fmt.Sprintf("stream %d already opened", streamID)) - } - curr.state = priorityNodeOpen - return - } - - // RFC 7540, Section 5.3.5: - // "All streams are initially assigned a non-exclusive dependency on stream 0x0. - // Pushed streams initially depend on their associated stream. In both cases, - // streams are assigned a default weight of 16." - parent := ws.nodes[options.PusherID] - if parent == nil { - parent = &ws.root - } - n := &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, - } - n.setParent(parent) - ws.nodes[streamID] = n - if streamID > ws.maxID { - ws.maxID = streamID - } -} - -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { - if streamID == 0 { - panic("violation of WriteScheduler interface: cannot close stream 0") - } - if ws.nodes[streamID] == nil { - panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) - } - if ws.nodes[streamID].state != priorityNodeOpen { - panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) - } - - n := ws.nodes[streamID] - n.state = priorityNodeClosed - n.addBytes(-n.bytes) - - q := n.q - ws.queuePool.put(&q) - n.q.s = nil - if ws.maxClosedNodesInTree > 0 { - ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) - } else { - ws.removeNode(n) - } -} - -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - if streamID == 0 { - panic("adjustPriority on root") - } - - // If streamID does not exist, there are two cases: - // - A closed stream that has been removed (this will have ID <= maxID) - // - An idle stream that is being used for "grouping" (this will have ID > maxID) - n := ws.nodes[streamID] - if n == nil { - if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { - return - } - ws.maxID = streamID - n = &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, - } - n.setParent(&ws.root) - ws.nodes[streamID] = n - ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) - } - - // Section 5.3.1: A dependency on a stream that is not currently in the tree - // results in that stream being given a default priority (Section 5.3.5). - parent := ws.nodes[priority.StreamDep] - if parent == nil { - n.setParent(&ws.root) - n.weight = priorityDefaultWeight - return - } - - // Ignore if the client tries to make a node its own parent. - if n == parent { - return - } - - // Section 5.3.3: - // "If a stream is made dependent on one of its own dependencies, the - // formerly dependent stream is first moved to be dependent on the - // reprioritized stream's previous parent. The moved dependency retains - // its weight." - // - // That is: if parent depends on n, move parent to depend on n.parent. - for x := parent.parent; x != nil; x = x.parent { - if x == n { - parent.setParent(n.parent) - break - } - } - - // Section 5.3.3: The exclusive flag causes the stream to become the sole - // dependency of its parent stream, causing other dependencies to become - // dependent on the exclusive stream. - if priority.Exclusive { - k := parent.kids - for k != nil { - next := k.next - if k != n { - k.setParent(n) - } - k = next - } - } - - n.setParent(parent) - n.weight = priority.Weight -} - -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode - if id := wr.StreamID(); id == 0 { - n = &ws.root - } else { - n = ws.nodes[id] - if n == nil { - // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. - if wr.DataSize() > 0 { - panic("add DATA on non-open stream") - } - n = &ws.root - } - } - n.q.push(wr) -} - -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { - limit := int32(math.MaxInt32) - if openParent { - limit = ws.writeThrottleLimit - } - wr, ok = n.q.consume(limit) - if !ok { - return false - } - n.addBytes(int64(wr.DataSize())) - // If B depends on A and B continuously has data available but A - // does not, gradually increase the throttling limit to allow B to - // steal more and more bandwidth from A. - if openParent { - ws.writeThrottleLimit += 1024 - if ws.writeThrottleLimit < 0 { - ws.writeThrottleLimit = math.MaxInt32 - } - } else if ws.enableWriteThrottle { - ws.writeThrottleLimit = 1024 - } - return true - }) - return wr, ok -} - -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { - if maxSize == 0 { - return - } - if len(*list) == maxSize { - // Remove the oldest node, then shift left. - ws.removeNode((*list)[0]) - x := (*list)[1:] - copy(*list, x) - *list = (*list)[:len(x)] - } - *list = append(*list, n) -} - -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) - } - n.setParent(nil) - delete(ws.nodes, n.id) -} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go deleted file mode 100644 index 36d7919..0000000 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "math" - -// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 -// priorities. Control frames like SETTINGS and PING are written before DATA -// frames, but if no control frames are queued and multiple streams have queued -// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. -func NewRandomWriteScheduler() WriteScheduler { - return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} -} - -type randomWriteScheduler struct { - // zero are frames not associated with a specific stream. - zero writeQueue - - // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle or closed, it's deleted from the map. - sq map[uint32]*writeQueue - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // no-op: idle streams are not tracked -} - -func (ws *randomWriteScheduler) CloseStream(streamID uint32) { - q, ok := ws.sq[streamID] - if !ok { - return - } - delete(ws.sq, streamID) - ws.queuePool.put(q) -} - -func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - // no-op: priorities are ignored -} - -func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { - id := wr.StreamID() - if id == 0 { - ws.zero.push(wr) - return - } - q, ok := ws.sq[id] - if !ok { - q = ws.queuePool.get() - ws.sq[id] = q - } - q.push(wr) -} - -func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { - // Control frames first. - if !ws.zero.empty() { - return ws.zero.shift(), true - } - // Iterate over all non-idle streams until finding one that can be consumed. - for _, q := range ws.sq { - if wr, ok := q.consume(math.MaxInt32); ok { - return wr, true - } - } - return FrameWriteRequest{}, false -} -- cgit v1.2.3