Add glide.yaml and vendor deps
This commit is contained in:
parent
db918f12ad
commit
5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
*~
|
||||
h2i/h2i
|
||||
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
||||
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
||||
20
vendor/golang.org/x/net/http2/README
generated
vendored
Normal file
20
vendor/golang.org/x/net/http2/README
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
This is a work-in-progress HTTP/2 implementation for Go.
|
||||
|
||||
It will eventually live in the Go standard library and won't require
|
||||
any changes to your code to use. It will just be automatic.
|
||||
|
||||
Status:
|
||||
|
||||
* The server support is pretty good. A few things are missing
|
||||
but are being worked on.
|
||||
* The client work has just started but shares a lot of code
|
||||
is coming along much quicker.
|
||||
|
||||
Docs are at https://godoc.org/golang.org/x/net/http2
|
||||
|
||||
Demo test server at https://http2.golang.org/
|
||||
|
||||
Help & bug reports welcome!
|
||||
|
||||
Contributing: https://golang.org/doc/contribute.html
|
||||
Bugs: https://golang.org/issue/new?title=x/net/http2:+
|
||||
225
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
Normal file
225
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
Normal file
|
|
@ -0,0 +1,225 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Transport code's client connection pooling.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
||||
type ClientConnPool interface {
|
||||
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
||||
MarkDead(*ClientConn)
|
||||
}
|
||||
|
||||
// TODO: use singleflight for dialing and addConnCalls?
|
||||
type clientConnPool struct {
|
||||
t *Transport
|
||||
|
||||
mu sync.Mutex // TODO: maybe switch to RWMutex
|
||||
// TODO: add support for sharing conns based on cert names
|
||||
// (e.g. share conn for googleapis.com and appspot.com)
|
||||
conns map[string][]*ClientConn // key is host:port
|
||||
dialing map[string]*dialCall // currently in-flight dials
|
||||
keys map[*ClientConn][]string
|
||||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
|
||||
}
|
||||
|
||||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, dialOnMiss)
|
||||
}
|
||||
|
||||
const (
|
||||
dialOnMiss = true
|
||||
noDialOnMiss = false
|
||||
)
|
||||
|
||||
func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[addr] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return cc, nil
|
||||
}
|
||||
}
|
||||
if !dialOnMiss {
|
||||
p.mu.Unlock()
|
||||
return nil, ErrNoCachedConn
|
||||
}
|
||||
call := p.getStartDialLocked(addr)
|
||||
p.mu.Unlock()
|
||||
<-call.done
|
||||
return call.res, call.err
|
||||
}
|
||||
|
||||
// dialCall is an in-flight Transport dial call to a host.
|
||||
type dialCall struct {
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
res *ClientConn // valid after done is closed
|
||||
err error // valid after done is closed
|
||||
}
|
||||
|
||||
// requires p.mu is held.
|
||||
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
||||
if call, ok := p.dialing[addr]; ok {
|
||||
// A dial is already in-flight. Don't start another.
|
||||
return call
|
||||
}
|
||||
call := &dialCall{p: p, done: make(chan struct{})}
|
||||
if p.dialing == nil {
|
||||
p.dialing = make(map[string]*dialCall)
|
||||
}
|
||||
p.dialing[addr] = call
|
||||
go call.dial(addr)
|
||||
return call
|
||||
}
|
||||
|
||||
// run in its own goroutine.
|
||||
func (c *dialCall) dial(addr string) {
|
||||
c.res, c.err = c.p.t.dialClientConn(addr)
|
||||
close(c.done)
|
||||
|
||||
c.p.mu.Lock()
|
||||
delete(c.p.dialing, addr)
|
||||
if c.err == nil {
|
||||
c.p.addConnLocked(addr, c.res)
|
||||
}
|
||||
c.p.mu.Unlock()
|
||||
}
|
||||
|
||||
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
|
||||
// already exist. It coalesces concurrent calls with the same key.
|
||||
// This is used by the http1 Transport code when it creates a new connection. Because
|
||||
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
|
||||
// the protocol), it can get into a situation where it has multiple TLS connections.
|
||||
// This code decides which ones live or die.
|
||||
// The return value used is whether c was used.
|
||||
// c is never closed.
|
||||
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[key] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
call, dup := p.addConnCalls[key]
|
||||
if !dup {
|
||||
if p.addConnCalls == nil {
|
||||
p.addConnCalls = make(map[string]*addConnCall)
|
||||
}
|
||||
call = &addConnCall{
|
||||
p: p,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
p.addConnCalls[key] = call
|
||||
go call.run(t, key, c)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
<-call.done
|
||||
if call.err != nil {
|
||||
return false, call.err
|
||||
}
|
||||
return !dup, nil
|
||||
}
|
||||
|
||||
type addConnCall struct {
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
||||
cc, err := t.NewClientConn(tc)
|
||||
|
||||
p := c.p
|
||||
p.mu.Lock()
|
||||
if err != nil {
|
||||
c.err = err
|
||||
} else {
|
||||
p.addConnLocked(key, cc)
|
||||
}
|
||||
delete(p.addConnCalls, key)
|
||||
p.mu.Unlock()
|
||||
close(c.done)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) addConn(key string, cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
p.addConnLocked(key, cc)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// p.mu must be held
|
||||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
||||
for _, v := range p.conns[key] {
|
||||
if v == cc {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.conns == nil {
|
||||
p.conns = make(map[string][]*ClientConn)
|
||||
}
|
||||
if p.keys == nil {
|
||||
p.keys = make(map[*ClientConn][]string)
|
||||
}
|
||||
p.conns[key] = append(p.conns[key], cc)
|
||||
p.keys[cc] = append(p.keys[cc], key)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) MarkDead(cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
for _, key := range p.keys[cc] {
|
||||
vv, ok := p.conns[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
newList := filterOutClientConn(vv, cc)
|
||||
if len(newList) > 0 {
|
||||
p.conns[key] = newList
|
||||
} else {
|
||||
delete(p.conns, key)
|
||||
}
|
||||
}
|
||||
delete(p.keys, cc)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) closeIdleConnections() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
// TODO: don't close a cc if it was just added to the pool
|
||||
// milliseconds ago and has never been used. There's currently
|
||||
// a small race window with the HTTP/1 Transport's integration
|
||||
// where it can add an idle conn just before using it, and
|
||||
// somebody else can concurrently call CloseIdleConns and
|
||||
// break some caller's RoundTrip.
|
||||
for _, vv := range p.conns {
|
||||
for _, cc := range vv {
|
||||
cc.closeIfIdle()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
|
||||
out := in[:0]
|
||||
for _, v := range in {
|
||||
if v != exclude {
|
||||
out = append(out, v)
|
||||
}
|
||||
}
|
||||
// If we filtered it out, zero out the last item to prevent
|
||||
// the GC from seeing it.
|
||||
if len(in) != len(out) {
|
||||
in[len(in)-1] = nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
89
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
Normal file
89
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.6
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||
connPool := new(clientConnPool)
|
||||
t2 := &Transport{
|
||||
ConnPool: noDialClientConnPool{connPool},
|
||||
t1: t1,
|
||||
}
|
||||
connPool.t = t2
|
||||
if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t1.TLSClientConfig == nil {
|
||||
t1.TLSClientConfig = new(tls.Config)
|
||||
}
|
||||
if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
|
||||
t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
|
||||
}
|
||||
if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
|
||||
t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
|
||||
}
|
||||
upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
|
||||
addr := authorityAddr(authority)
|
||||
if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
|
||||
go c.Close()
|
||||
return erringRoundTripper{err}
|
||||
} else if !used {
|
||||
// Turns out we don't need this c.
|
||||
// For example, two goroutines made requests to the same host
|
||||
// at the same time, both kicking off TCP dials. (since protocol
|
||||
// was unknown)
|
||||
go c.Close()
|
||||
}
|
||||
return t2
|
||||
}
|
||||
if m := t1.TLSNextProto; len(m) == 0 {
|
||||
t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
|
||||
"h2": upgradeFn,
|
||||
}
|
||||
} else {
|
||||
m["h2"] = upgradeFn
|
||||
}
|
||||
return t2, nil
|
||||
}
|
||||
|
||||
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
||||
// convering panics into errors.
|
||||
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("%v", e)
|
||||
}
|
||||
}()
|
||||
t.RegisterProtocol("https", rt)
|
||||
return nil
|
||||
}
|
||||
|
||||
// noDialClientConnPool is an implementation of http2.ClientConnPool
|
||||
// which never dials. We let the HTTP/1.1 client dial and use its TLS
|
||||
// connection instead.
|
||||
type noDialClientConnPool struct{ *clientConnPool }
|
||||
|
||||
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, noDialOnMiss)
|
||||
}
|
||||
|
||||
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
|
||||
// if there's already has a cached connection to the host.
|
||||
type noDialH2RoundTripper struct{ t *Transport }
|
||||
|
||||
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
res, err := rt.t.RoundTrip(req)
|
||||
if err == ErrNoCachedConn {
|
||||
return nil, http.ErrSkipAltProtocol
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
122
vendor/golang.org/x/net/http2/errors.go
generated
vendored
Normal file
122
vendor/golang.org/x/net/http2/errors.go
generated
vendored
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
|
||||
type ErrCode uint32
|
||||
|
||||
const (
|
||||
ErrCodeNo ErrCode = 0x0
|
||||
ErrCodeProtocol ErrCode = 0x1
|
||||
ErrCodeInternal ErrCode = 0x2
|
||||
ErrCodeFlowControl ErrCode = 0x3
|
||||
ErrCodeSettingsTimeout ErrCode = 0x4
|
||||
ErrCodeStreamClosed ErrCode = 0x5
|
||||
ErrCodeFrameSize ErrCode = 0x6
|
||||
ErrCodeRefusedStream ErrCode = 0x7
|
||||
ErrCodeCancel ErrCode = 0x8
|
||||
ErrCodeCompression ErrCode = 0x9
|
||||
ErrCodeConnect ErrCode = 0xa
|
||||
ErrCodeEnhanceYourCalm ErrCode = 0xb
|
||||
ErrCodeInadequateSecurity ErrCode = 0xc
|
||||
ErrCodeHTTP11Required ErrCode = 0xd
|
||||
)
|
||||
|
||||
var errCodeName = map[ErrCode]string{
|
||||
ErrCodeNo: "NO_ERROR",
|
||||
ErrCodeProtocol: "PROTOCOL_ERROR",
|
||||
ErrCodeInternal: "INTERNAL_ERROR",
|
||||
ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
|
||||
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
|
||||
ErrCodeStreamClosed: "STREAM_CLOSED",
|
||||
ErrCodeFrameSize: "FRAME_SIZE_ERROR",
|
||||
ErrCodeRefusedStream: "REFUSED_STREAM",
|
||||
ErrCodeCancel: "CANCEL",
|
||||
ErrCodeCompression: "COMPRESSION_ERROR",
|
||||
ErrCodeConnect: "CONNECT_ERROR",
|
||||
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
|
||||
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
|
||||
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
|
||||
}
|
||||
|
||||
func (e ErrCode) String() string {
|
||||
if s, ok := errCodeName[e]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
||||
}
|
||||
|
||||
// ConnectionError is an error that results in the termination of the
|
||||
// entire connection.
|
||||
type ConnectionError ErrCode
|
||||
|
||||
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
|
||||
|
||||
// StreamError is an error that only affects one stream within an
|
||||
// HTTP/2 connection.
|
||||
type StreamError struct {
|
||||
StreamID uint32
|
||||
Code ErrCode
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
||||
}
|
||||
|
||||
// 6.9.1 The Flow Control Window
|
||||
// "If a sender receives a WINDOW_UPDATE that causes a flow control
|
||||
// window to exceed this maximum it MUST terminate either the stream
|
||||
// or the connection, as appropriate. For streams, [...]; for the
|
||||
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
|
||||
type goAwayFlowError struct{}
|
||||
|
||||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
||||
|
||||
// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
|
||||
|
||||
// Errors of this type are only returned by the frame parser functions
|
||||
// and converted into ConnectionError(ErrCodeProtocol).
|
||||
type connError struct {
|
||||
Code ErrCode
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e connError) Error() string {
|
||||
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
|
||||
}
|
||||
|
||||
type pseudoHeaderError string
|
||||
|
||||
func (e pseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("invalid pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type duplicatePseudoHeaderError string
|
||||
|
||||
func (e duplicatePseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldNameError string
|
||||
|
||||
func (e headerFieldNameError) Error() string {
|
||||
return fmt.Sprintf("invalid header field name %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldValueError string
|
||||
|
||||
func (e headerFieldValueError) Error() string {
|
||||
return fmt.Sprintf("invalid header field value %q", string(e))
|
||||
}
|
||||
|
||||
var (
|
||||
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
|
||||
errPseudoAfterRegular = errors.New("pseudo header field after regular")
|
||||
)
|
||||
24
vendor/golang.org/x/net/http2/errors_test.go
generated
vendored
Normal file
24
vendor/golang.org/x/net/http2/errors_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestErrCodeString(t *testing.T) {
|
||||
tests := []struct {
|
||||
err ErrCode
|
||||
want string
|
||||
}{
|
||||
{ErrCodeProtocol, "PROTOCOL_ERROR"},
|
||||
{0xd, "HTTP_1_1_REQUIRED"},
|
||||
{0xf, "unknown error code 0xf"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
got := tt.err.String()
|
||||
if got != tt.want {
|
||||
t.Errorf("%d. Error = %q; want %q", i, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
60
vendor/golang.org/x/net/http2/fixed_buffer.go
generated
vendored
Normal file
60
vendor/golang.org/x/net/http2/fixed_buffer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
|
||||
// It never allocates, but moves old data as new data is written.
|
||||
type fixedBuffer struct {
|
||||
buf []byte
|
||||
r, w int
|
||||
}
|
||||
|
||||
var (
|
||||
errReadEmpty = errors.New("read from empty fixedBuffer")
|
||||
errWriteFull = errors.New("write on full fixedBuffer")
|
||||
)
|
||||
|
||||
// Read copies bytes from the buffer into p.
|
||||
// It is an error to read when no data is available.
|
||||
func (b *fixedBuffer) Read(p []byte) (n int, err error) {
|
||||
if b.r == b.w {
|
||||
return 0, errReadEmpty
|
||||
}
|
||||
n = copy(p, b.buf[b.r:b.w])
|
||||
b.r += n
|
||||
if b.r == b.w {
|
||||
b.r = 0
|
||||
b.w = 0
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Len returns the number of bytes of the unread portion of the buffer.
|
||||
func (b *fixedBuffer) Len() int {
|
||||
return b.w - b.r
|
||||
}
|
||||
|
||||
// Write copies bytes from p into the buffer.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
func (b *fixedBuffer) Write(p []byte) (n int, err error) {
|
||||
// Slide existing data to beginning.
|
||||
if b.r > 0 && len(p) > len(b.buf)-b.w {
|
||||
copy(b.buf, b.buf[b.r:b.w])
|
||||
b.w -= b.r
|
||||
b.r = 0
|
||||
}
|
||||
|
||||
// Write new data.
|
||||
n = copy(b.buf[b.w:], p)
|
||||
b.w += n
|
||||
if n < len(p) {
|
||||
err = errWriteFull
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
128
vendor/golang.org/x/net/http2/fixed_buffer_test.go
generated
vendored
Normal file
128
vendor/golang.org/x/net/http2/fixed_buffer_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var bufferReadTests = []struct {
|
||||
buf fixedBuffer
|
||||
read, wn int
|
||||
werr error
|
||||
wp []byte
|
||||
wbuf fixedBuffer
|
||||
}{
|
||||
{
|
||||
fixedBuffer{[]byte{'a', 0}, 0, 1},
|
||||
5, 1, nil, []byte{'a'},
|
||||
fixedBuffer{[]byte{'a', 0}, 0, 0},
|
||||
},
|
||||
{
|
||||
fixedBuffer{[]byte{0, 'a'}, 1, 2},
|
||||
5, 1, nil, []byte{'a'},
|
||||
fixedBuffer{[]byte{0, 'a'}, 0, 0},
|
||||
},
|
||||
{
|
||||
fixedBuffer{[]byte{'a', 'b'}, 0, 2},
|
||||
1, 1, nil, []byte{'a'},
|
||||
fixedBuffer{[]byte{'a', 'b'}, 1, 2},
|
||||
},
|
||||
{
|
||||
fixedBuffer{[]byte{}, 0, 0},
|
||||
5, 0, errReadEmpty, []byte{},
|
||||
fixedBuffer{[]byte{}, 0, 0},
|
||||
},
|
||||
}
|
||||
|
||||
func TestBufferRead(t *testing.T) {
|
||||
for i, tt := range bufferReadTests {
|
||||
read := make([]byte, tt.read)
|
||||
n, err := tt.buf.Read(read)
|
||||
if n != tt.wn {
|
||||
t.Errorf("#%d: wn = %d want %d", i, n, tt.wn)
|
||||
continue
|
||||
}
|
||||
if err != tt.werr {
|
||||
t.Errorf("#%d: werr = %v want %v", i, err, tt.werr)
|
||||
continue
|
||||
}
|
||||
read = read[:n]
|
||||
if !reflect.DeepEqual(read, tt.wp) {
|
||||
t.Errorf("#%d: read = %+v want %+v", i, read, tt.wp)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.buf, tt.wbuf) {
|
||||
t.Errorf("#%d: buf = %+v want %+v", i, tt.buf, tt.wbuf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var bufferWriteTests = []struct {
|
||||
buf fixedBuffer
|
||||
write, wn int
|
||||
werr error
|
||||
wbuf fixedBuffer
|
||||
}{
|
||||
{
|
||||
buf: fixedBuffer{
|
||||
buf: []byte{},
|
||||
},
|
||||
wbuf: fixedBuffer{
|
||||
buf: []byte{},
|
||||
},
|
||||
},
|
||||
{
|
||||
buf: fixedBuffer{
|
||||
buf: []byte{1, 'a'},
|
||||
},
|
||||
write: 1,
|
||||
wn: 1,
|
||||
wbuf: fixedBuffer{
|
||||
buf: []byte{0, 'a'},
|
||||
w: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
buf: fixedBuffer{
|
||||
buf: []byte{'a', 1},
|
||||
r: 1,
|
||||
w: 1,
|
||||
},
|
||||
write: 2,
|
||||
wn: 2,
|
||||
wbuf: fixedBuffer{
|
||||
buf: []byte{0, 0},
|
||||
w: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
buf: fixedBuffer{
|
||||
buf: []byte{},
|
||||
},
|
||||
write: 5,
|
||||
werr: errWriteFull,
|
||||
wbuf: fixedBuffer{
|
||||
buf: []byte{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestBufferWrite(t *testing.T) {
|
||||
for i, tt := range bufferWriteTests {
|
||||
n, err := tt.buf.Write(make([]byte, tt.write))
|
||||
if n != tt.wn {
|
||||
t.Errorf("#%d: wrote %d bytes; want %d", i, n, tt.wn)
|
||||
continue
|
||||
}
|
||||
if err != tt.werr {
|
||||
t.Errorf("#%d: error = %v; want %v", i, err, tt.werr)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(tt.buf, tt.wbuf) {
|
||||
t.Errorf("#%d: buf = %+v; want %+v", i, tt.buf, tt.wbuf)
|
||||
}
|
||||
}
|
||||
}
|
||||
50
vendor/golang.org/x/net/http2/flow.go
generated
vendored
Normal file
50
vendor/golang.org/x/net/http2/flow.go
generated
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Flow control
|
||||
|
||||
package http2
|
||||
|
||||
// flow is the flow control window's size.
|
||||
type flow struct {
|
||||
// n is the number of DATA bytes we're allowed to send.
|
||||
// A flow is kept both on a conn and a per-stream.
|
||||
n int32
|
||||
|
||||
// conn points to the shared connection-level flow that is
|
||||
// shared by all streams on that conn. It is nil for the flow
|
||||
// that's on the conn directly.
|
||||
conn *flow
|
||||
}
|
||||
|
||||
func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
|
||||
|
||||
func (f *flow) available() int32 {
|
||||
n := f.n
|
||||
if f.conn != nil && f.conn.n < n {
|
||||
n = f.conn.n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (f *flow) take(n int32) {
|
||||
if n > f.available() {
|
||||
panic("internal error: took too much")
|
||||
}
|
||||
f.n -= n
|
||||
if f.conn != nil {
|
||||
f.conn.n -= n
|
||||
}
|
||||
}
|
||||
|
||||
// add adds n bytes (positive or negative) to the flow control window.
|
||||
// It returns false if the sum would exceed 2^31-1.
|
||||
func (f *flow) add(n int32) bool {
|
||||
remain := (1<<31 - 1) - f.n
|
||||
if n > remain {
|
||||
return false
|
||||
}
|
||||
f.n += n
|
||||
return true
|
||||
}
|
||||
53
vendor/golang.org/x/net/http2/flow_test.go
generated
vendored
Normal file
53
vendor/golang.org/x/net/http2/flow_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestFlow(t *testing.T) {
|
||||
var st flow
|
||||
var conn flow
|
||||
st.add(3)
|
||||
conn.add(2)
|
||||
|
||||
if got, want := st.available(), int32(3); got != want {
|
||||
t.Errorf("available = %d; want %d", got, want)
|
||||
}
|
||||
st.setConnFlow(&conn)
|
||||
if got, want := st.available(), int32(2); got != want {
|
||||
t.Errorf("after parent setup, available = %d; want %d", got, want)
|
||||
}
|
||||
|
||||
st.take(2)
|
||||
if got, want := conn.available(), int32(0); got != want {
|
||||
t.Errorf("after taking 2, conn = %d; want %d", got, want)
|
||||
}
|
||||
if got, want := st.available(), int32(0); got != want {
|
||||
t.Errorf("after taking 2, stream = %d; want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlowAdd(t *testing.T) {
|
||||
var f flow
|
||||
if !f.add(1) {
|
||||
t.Fatal("failed to add 1")
|
||||
}
|
||||
if !f.add(-1) {
|
||||
t.Fatal("failed to add -1")
|
||||
}
|
||||
if got, want := f.available(), int32(0); got != want {
|
||||
t.Fatalf("size = %d; want %d", got, want)
|
||||
}
|
||||
if !f.add(1<<31 - 1) {
|
||||
t.Fatal("failed to add 2^31-1")
|
||||
}
|
||||
if got, want := f.available(), int32(1<<31-1); got != want {
|
||||
t.Fatalf("size = %d; want %d", got, want)
|
||||
}
|
||||
if f.add(1) {
|
||||
t.Fatal("adding 1 to max shouldn't be allowed")
|
||||
}
|
||||
|
||||
}
|
||||
1496
vendor/golang.org/x/net/http2/frame.go
generated
vendored
Normal file
1496
vendor/golang.org/x/net/http2/frame.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
975
vendor/golang.org/x/net/http2/frame_test.go
generated
vendored
Normal file
975
vendor/golang.org/x/net/http2/frame_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
11
vendor/golang.org/x/net/http2/go15.go
generated
vendored
Normal file
11
vendor/golang.org/x/net/http2/go15.go
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.5
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
func requestCancel(req *http.Request) <-chan struct{} { return req.Cancel }
|
||||
170
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
Normal file
170
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
Normal file
|
|
@ -0,0 +1,170 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Defensive debug-only utility to track that functions run on the
|
||||
// goroutine that they're supposed to.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
|
||||
|
||||
type goroutineLock uint64
|
||||
|
||||
func newGoroutineLock() goroutineLock {
|
||||
if !DebugGoroutines {
|
||||
return 0
|
||||
}
|
||||
return goroutineLock(curGoroutineID())
|
||||
}
|
||||
|
||||
func (g goroutineLock) check() {
|
||||
if !DebugGoroutines {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() != uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
func (g goroutineLock) checkNotOn() {
|
||||
if !DebugGoroutines {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() == uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
var goroutineSpace = []byte("goroutine ")
|
||||
|
||||
func curGoroutineID() uint64 {
|
||||
bp := littleBuf.Get().(*[]byte)
|
||||
defer littleBuf.Put(bp)
|
||||
b := *bp
|
||||
b = b[:runtime.Stack(b, false)]
|
||||
// Parse the 4707 out of "goroutine 4707 ["
|
||||
b = bytes.TrimPrefix(b, goroutineSpace)
|
||||
i := bytes.IndexByte(b, ' ')
|
||||
if i < 0 {
|
||||
panic(fmt.Sprintf("No space found in %q", b))
|
||||
}
|
||||
b = b[:i]
|
||||
n, err := parseUintBytes(b, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
var littleBuf = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, 64)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
|
||||
// parseUintBytes is like strconv.ParseUint, but using a []byte.
|
||||
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
|
||||
var cutoff, maxVal uint64
|
||||
|
||||
if bitSize == 0 {
|
||||
bitSize = int(strconv.IntSize)
|
||||
}
|
||||
|
||||
s0 := s
|
||||
switch {
|
||||
case len(s) < 1:
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
|
||||
case 2 <= base && base <= 36:
|
||||
// valid base; nothing to do
|
||||
|
||||
case base == 0:
|
||||
// Look for octal, hex prefix.
|
||||
switch {
|
||||
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
|
||||
base = 16
|
||||
s = s[2:]
|
||||
if len(s) < 1 {
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
case s[0] == '0':
|
||||
base = 8
|
||||
default:
|
||||
base = 10
|
||||
}
|
||||
|
||||
default:
|
||||
err = errors.New("invalid base " + strconv.Itoa(base))
|
||||
goto Error
|
||||
}
|
||||
|
||||
n = 0
|
||||
cutoff = cutoff64(base)
|
||||
maxVal = 1<<uint(bitSize) - 1
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
var v byte
|
||||
d := s[i]
|
||||
switch {
|
||||
case '0' <= d && d <= '9':
|
||||
v = d - '0'
|
||||
case 'a' <= d && d <= 'z':
|
||||
v = d - 'a' + 10
|
||||
case 'A' <= d && d <= 'Z':
|
||||
v = d - 'A' + 10
|
||||
default:
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
if int(v) >= base {
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
|
||||
if n >= cutoff {
|
||||
// n*base overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n *= uint64(base)
|
||||
|
||||
n1 := n + uint64(v)
|
||||
if n1 < n || n1 > maxVal {
|
||||
// n+v overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n = n1
|
||||
}
|
||||
|
||||
return n, nil
|
||||
|
||||
Error:
|
||||
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
|
||||
}
|
||||
|
||||
// Return the first number n such that n*base >= 1<<64.
|
||||
func cutoff64(base int) uint64 {
|
||||
if base < 2 {
|
||||
return 0
|
||||
}
|
||||
return (1<<64-1)/uint64(base) + 1
|
||||
}
|
||||
33
vendor/golang.org/x/net/http2/gotrack_test.go
generated
vendored
Normal file
33
vendor/golang.org/x/net/http2/gotrack_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGoroutineLock(t *testing.T) {
|
||||
oldDebug := DebugGoroutines
|
||||
DebugGoroutines = true
|
||||
defer func() { DebugGoroutines = oldDebug }()
|
||||
|
||||
g := newGoroutineLock()
|
||||
g.check()
|
||||
|
||||
sawPanic := make(chan interface{})
|
||||
go func() {
|
||||
defer func() { sawPanic <- recover() }()
|
||||
g.check() // should panic
|
||||
}()
|
||||
e := <-sawPanic
|
||||
if e == nil {
|
||||
t.Fatal("did not see panic from check in other goroutine")
|
||||
}
|
||||
if !strings.Contains(fmt.Sprint(e), "wrong goroutine") {
|
||||
t.Errorf("expected on see panic about running on the wrong goroutine; got %v", e)
|
||||
}
|
||||
}
|
||||
5
vendor/golang.org/x/net/http2/h2demo/.gitignore
generated
vendored
Normal file
5
vendor/golang.org/x/net/http2/h2demo/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
h2demo
|
||||
h2demo.linux
|
||||
client-id.dat
|
||||
client-secret.dat
|
||||
token.dat
|
||||
8
vendor/golang.org/x/net/http2/h2demo/Makefile
generated
vendored
Normal file
8
vendor/golang.org/x/net/http2/h2demo/Makefile
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
h2demo.linux: h2demo.go
|
||||
GOOS=linux go build --tags=h2demo -o h2demo.linux .
|
||||
|
||||
FORCE:
|
||||
|
||||
upload: FORCE
|
||||
go install golang.org/x/build/cmd/upload
|
||||
upload --verbose --osarch=linux-amd64 --tags=h2demo --file=go:golang.org/x/net/http2/h2demo --public http2-demo-server-tls/h2demo
|
||||
16
vendor/golang.org/x/net/http2/h2demo/README
generated
vendored
Normal file
16
vendor/golang.org/x/net/http2/h2demo/README
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
|
||||
Client:
|
||||
-- Firefox nightly with about:config network.http.spdy.enabled.http2draft set true
|
||||
-- Chrome: go to chrome://flags/#enable-spdy4, save and restart (button at bottom)
|
||||
|
||||
Make CA:
|
||||
$ openssl genrsa -out rootCA.key 2048
|
||||
$ openssl req -x509 -new -nodes -key rootCA.key -days 1024 -out rootCA.pem
|
||||
... install that to Firefox
|
||||
|
||||
Make cert:
|
||||
$ openssl genrsa -out server.key 2048
|
||||
$ openssl req -new -key server.key -out server.csr
|
||||
$ openssl x509 -req -in server.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out server.crt -days 500
|
||||
|
||||
|
||||
504
vendor/golang.org/x/net/http2/h2demo/h2demo.go
generated
vendored
Normal file
504
vendor/golang.org/x/net/http2/h2demo/h2demo.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
302
vendor/golang.org/x/net/http2/h2demo/launch.go
generated
vendored
Normal file
302
vendor/golang.org/x/net/http2/h2demo/launch.go
generated
vendored
Normal file
|
|
@ -0,0 +1,302 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
proj = flag.String("project", "symbolic-datum-552", "name of Project")
|
||||
zone = flag.String("zone", "us-central1-a", "GCE zone")
|
||||
mach = flag.String("machinetype", "n1-standard-1", "Machine type")
|
||||
instName = flag.String("instance_name", "http2-demo", "Name of VM instance.")
|
||||
sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
|
||||
staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.")
|
||||
|
||||
writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.")
|
||||
publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.")
|
||||
)
|
||||
|
||||
func readFile(v string) string {
|
||||
slurp, err := ioutil.ReadFile(v)
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading %s: %v", v, err)
|
||||
}
|
||||
return strings.TrimSpace(string(slurp))
|
||||
}
|
||||
|
||||
var config = &oauth2.Config{
|
||||
// The client-id and secret should be for an "Installed Application" when using
|
||||
// the CLI. Later we'll use a web application with a callback.
|
||||
ClientID: readFile("client-id.dat"),
|
||||
ClientSecret: readFile("client-secret.dat"),
|
||||
Endpoint: google.Endpoint,
|
||||
Scopes: []string{
|
||||
compute.DevstorageFullControlScope,
|
||||
compute.ComputeScope,
|
||||
"https://www.googleapis.com/auth/sqlservice",
|
||||
"https://www.googleapis.com/auth/sqlservice.admin",
|
||||
},
|
||||
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
|
||||
}
|
||||
|
||||
const baseConfig = `#cloud-config
|
||||
coreos:
|
||||
units:
|
||||
- name: h2demo.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=HTTP2 Demo
|
||||
|
||||
[Service]
|
||||
ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo'
|
||||
ExecStart=/opt/bin/h2demo --prod
|
||||
RestartSec=5s
|
||||
Restart=always
|
||||
Type=simple
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *proj == "" {
|
||||
log.Fatalf("Missing --project flag")
|
||||
}
|
||||
prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
|
||||
machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
|
||||
|
||||
const tokenFileName = "token.dat"
|
||||
tokenFile := tokenCacheFile(tokenFileName)
|
||||
tokenSource := oauth2.ReuseTokenSource(nil, tokenFile)
|
||||
token, err := tokenSource.Token()
|
||||
if err != nil {
|
||||
if *writeObject != "" {
|
||||
log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
|
||||
}
|
||||
log.Printf("Error getting token from %s: %v", tokenFileName, err)
|
||||
log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
|
||||
fmt.Print("\nEnter auth code: ")
|
||||
sc := bufio.NewScanner(os.Stdin)
|
||||
sc.Scan()
|
||||
authCode := strings.TrimSpace(sc.Text())
|
||||
token, err = config.Exchange(oauth2.NoContext, authCode)
|
||||
if err != nil {
|
||||
log.Fatalf("Error exchanging auth code for a token: %v", err)
|
||||
}
|
||||
if err := tokenFile.WriteToken(token); err != nil {
|
||||
log.Fatalf("Error writing to %s: %v", tokenFileName, err)
|
||||
}
|
||||
tokenSource = oauth2.ReuseTokenSource(token, nil)
|
||||
}
|
||||
|
||||
oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
|
||||
|
||||
if *writeObject != "" {
|
||||
writeCloudStorageObject(oauthClient)
|
||||
return
|
||||
}
|
||||
|
||||
computeService, _ := compute.New(oauthClient)
|
||||
|
||||
natIP := *staticIP
|
||||
if natIP == "" {
|
||||
// Try to find it by name.
|
||||
aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList
|
||||
IPLoop:
|
||||
for _, asl := range aggAddrList.Items {
|
||||
for _, addr := range asl.Addresses {
|
||||
if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
|
||||
natIP = addr.Address
|
||||
break IPLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cloudConfig := baseConfig
|
||||
if *sshPub != "" {
|
||||
key := strings.TrimSpace(readFile(*sshPub))
|
||||
cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key)
|
||||
}
|
||||
if os.Getenv("USER") == "bradfitz" {
|
||||
cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
|
||||
}
|
||||
const maxCloudConfig = 32 << 10 // per compute API docs
|
||||
if len(cloudConfig) > maxCloudConfig {
|
||||
log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
|
||||
}
|
||||
|
||||
instance := &compute.Instance{
|
||||
Name: *instName,
|
||||
Description: "Go Builder",
|
||||
MachineType: machType,
|
||||
Disks: []*compute.AttachedDisk{instanceDisk(computeService)},
|
||||
Tags: &compute.Tags{
|
||||
Items: []string{"http-server", "https-server"},
|
||||
},
|
||||
Metadata: &compute.Metadata{
|
||||
Items: []*compute.MetadataItems{
|
||||
{
|
||||
Key: "user-data",
|
||||
Value: &cloudConfig,
|
||||
},
|
||||
},
|
||||
},
|
||||
NetworkInterfaces: []*compute.NetworkInterface{
|
||||
{
|
||||
AccessConfigs: []*compute.AccessConfig{
|
||||
{
|
||||
Type: "ONE_TO_ONE_NAT",
|
||||
Name: "External NAT",
|
||||
NatIP: natIP,
|
||||
},
|
||||
},
|
||||
Network: prefix + "/global/networks/default",
|
||||
},
|
||||
},
|
||||
ServiceAccounts: []*compute.ServiceAccount{
|
||||
{
|
||||
Email: "default",
|
||||
Scopes: []string{
|
||||
compute.DevstorageFullControlScope,
|
||||
compute.ComputeScope,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
log.Printf("Creating instance...")
|
||||
op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create instance: %v", err)
|
||||
}
|
||||
opName := op.Name
|
||||
log.Printf("Created. Waiting on operation %v", opName)
|
||||
OpLoop:
|
||||
for {
|
||||
time.Sleep(2 * time.Second)
|
||||
op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get op %s: %v", opName, err)
|
||||
}
|
||||
switch op.Status {
|
||||
case "PENDING", "RUNNING":
|
||||
log.Printf("Waiting on operation %v", opName)
|
||||
continue
|
||||
case "DONE":
|
||||
if op.Error != nil {
|
||||
for _, operr := range op.Error.Errors {
|
||||
log.Printf("Error: %+v", operr)
|
||||
}
|
||||
log.Fatalf("Failed to start.")
|
||||
}
|
||||
log.Printf("Success. %+v", op)
|
||||
break OpLoop
|
||||
default:
|
||||
log.Fatalf("Unknown status %q: %+v", op.Status, op)
|
||||
}
|
||||
}
|
||||
|
||||
inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting instance after creation: %v", err)
|
||||
}
|
||||
ij, _ := json.MarshalIndent(inst, "", " ")
|
||||
log.Printf("Instance: %s", ij)
|
||||
}
|
||||
|
||||
func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
|
||||
const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016"
|
||||
diskName := *instName + "-disk"
|
||||
|
||||
return &compute.AttachedDisk{
|
||||
AutoDelete: true,
|
||||
Boot: true,
|
||||
Type: "PERSISTENT",
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
DiskName: diskName,
|
||||
SourceImage: imageURL,
|
||||
DiskSizeGb: 50,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func writeCloudStorageObject(httpClient *http.Client) {
|
||||
content := os.Stdin
|
||||
const maxSlurp = 1 << 20
|
||||
var buf bytes.Buffer
|
||||
n, err := io.CopyN(&buf, content, maxSlurp)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatalf("Error reading from stdin: %v, %v", n, err)
|
||||
}
|
||||
contentType := http.DetectContentType(buf.Bytes())
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
req.Header.Set("x-goog-api-version", "2")
|
||||
if *publicObject {
|
||||
req.Header.Set("x-goog-acl", "public-read")
|
||||
}
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
res, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
res.Write(os.Stderr)
|
||||
log.Fatalf("Failed.")
|
||||
}
|
||||
log.Printf("Success.")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
type tokenCacheFile string
|
||||
|
||||
func (f tokenCacheFile) Token() (*oauth2.Token, error) {
|
||||
slurp, err := ioutil.ReadFile(string(f))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := new(oauth2.Token)
|
||||
if err := json.Unmarshal(slurp, t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (f tokenCacheFile) WriteToken(t *oauth2.Token) error {
|
||||
jt, err := json.Marshal(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(string(f), jt, 0600)
|
||||
}
|
||||
27
vendor/golang.org/x/net/http2/h2demo/rootCA.key
generated
vendored
Normal file
27
vendor/golang.org/x/net/http2/h2demo/rootCA.key
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q
|
||||
62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby
|
||||
XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV
|
||||
mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ
|
||||
JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ
|
||||
SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA
|
||||
nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e
|
||||
/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx
|
||||
qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser
|
||||
hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j
|
||||
NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E
|
||||
LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7
|
||||
8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c
|
||||
0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws
|
||||
K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd
|
||||
bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo
|
||||
QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt
|
||||
Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1
|
||||
nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy
|
||||
b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7
|
||||
gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev
|
||||
WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr
|
||||
C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj
|
||||
x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA
|
||||
hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y
|
||||
-----END RSA PRIVATE KEY-----
|
||||
26
vendor/golang.org/x/net/http2/h2demo/rootCA.pem
generated
vendored
Normal file
26
vendor/golang.org/x/net/http2/h2demo/rootCA.pem
generated
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV
|
||||
BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG
|
||||
A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3
|
||||
DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0
|
||||
NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG
|
||||
cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv
|
||||
c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B
|
||||
AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS
|
||||
R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT
|
||||
ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk
|
||||
JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3
|
||||
mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW
|
||||
caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G
|
||||
A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt
|
||||
hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB
|
||||
MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES
|
||||
MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv
|
||||
bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h
|
||||
U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao
|
||||
eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4
|
||||
UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD
|
||||
58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n
|
||||
sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF
|
||||
kPe6XoSbiLm/kxk32T0=
|
||||
-----END CERTIFICATE-----
|
||||
1
vendor/golang.org/x/net/http2/h2demo/rootCA.srl
generated
vendored
Normal file
1
vendor/golang.org/x/net/http2/h2demo/rootCA.srl
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
E2CE26BF3285059C
|
||||
20
vendor/golang.org/x/net/http2/h2demo/server.crt
generated
vendored
Normal file
20
vendor/golang.org/x/net/http2/h2demo/server.crt
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV
|
||||
UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT
|
||||
C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW
|
||||
DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow
|
||||
RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE
|
||||
ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
|
||||
MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l
|
||||
gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2
|
||||
dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL
|
||||
A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws
|
||||
/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88
|
||||
F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB
|
||||
AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R
|
||||
rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD
|
||||
EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19
|
||||
KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI
|
||||
dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU
|
||||
90p6/CbU71bGbfpM2PHot2fm
|
||||
-----END CERTIFICATE-----
|
||||
27
vendor/golang.org/x/net/http2/h2demo/server.key
generated
vendored
Normal file
27
vendor/golang.org/x/net/http2/h2demo/server.key
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi
|
||||
fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm
|
||||
J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef
|
||||
b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55
|
||||
mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/
|
||||
fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p
|
||||
3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3
|
||||
qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4
|
||||
NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80
|
||||
LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN
|
||||
a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+
|
||||
Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL
|
||||
W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO
|
||||
gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm
|
||||
S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS
|
||||
Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp
|
||||
V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4
|
||||
KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4
|
||||
yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5
|
||||
drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e
|
||||
ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R
|
||||
48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5
|
||||
c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY
|
||||
nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl
|
||||
IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd
|
||||
-----END RSA PRIVATE KEY-----
|
||||
97
vendor/golang.org/x/net/http2/h2i/README.md
generated
vendored
Normal file
97
vendor/golang.org/x/net/http2/h2i/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
# h2i
|
||||
|
||||
**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol'
|
||||
days of telnetting to your HTTP/1.n servers? We're bringing you
|
||||
back.
|
||||
|
||||
Features:
|
||||
- send raw HTTP/2 frames
|
||||
- PING
|
||||
- SETTINGS
|
||||
- HEADERS
|
||||
- etc
|
||||
- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2
|
||||
- pretty print all received HTTP/2 frames from the peer (including HPACK decoding)
|
||||
- tab completion of commands, options
|
||||
|
||||
Not yet features, but soon:
|
||||
- unnecessary CONTINUATION frames on short boundaries, to test peer implementations
|
||||
- request bodies (DATA frames)
|
||||
- send invalid frames for testing server implementations (supported by underlying Framer)
|
||||
|
||||
Later:
|
||||
- act like a server
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get golang.org/x/net/http2/h2i
|
||||
$ h2i <host>
|
||||
```
|
||||
|
||||
## Demo
|
||||
|
||||
```
|
||||
$ h2i
|
||||
Usage: h2i <hostname>
|
||||
|
||||
-insecure
|
||||
Whether to skip TLS cert validation
|
||||
-nextproto string
|
||||
Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14")
|
||||
|
||||
$ h2i google.com
|
||||
Connecting to google.com:443 ...
|
||||
Connected to 74.125.224.41:443
|
||||
Negotiated protocol "h2-14"
|
||||
[FrameHeader SETTINGS len=18]
|
||||
[MAX_CONCURRENT_STREAMS = 100]
|
||||
[INITIAL_WINDOW_SIZE = 1048576]
|
||||
[MAX_FRAME_SIZE = 16384]
|
||||
[FrameHeader WINDOW_UPDATE len=4]
|
||||
Window-Increment = 983041
|
||||
|
||||
h2i> PING h2iSayHI
|
||||
[FrameHeader PING flags=ACK len=8]
|
||||
Data = "h2iSayHI"
|
||||
h2i> headers
|
||||
(as HTTP/1.1)> GET / HTTP/1.1
|
||||
(as HTTP/1.1)> Host: ip.appspot.com
|
||||
(as HTTP/1.1)> User-Agent: h2i/brad-n-blake
|
||||
(as HTTP/1.1)>
|
||||
Opening Stream-ID 1:
|
||||
:authority = ip.appspot.com
|
||||
:method = GET
|
||||
:path = /
|
||||
:scheme = https
|
||||
user-agent = h2i/brad-n-blake
|
||||
[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77]
|
||||
:status = "200"
|
||||
alternate-protocol = "443:quic,p=1"
|
||||
content-length = "15"
|
||||
content-type = "text/html"
|
||||
date = "Fri, 01 May 2015 23:06:56 GMT"
|
||||
server = "Google Frontend"
|
||||
[FrameHeader DATA flags=END_STREAM stream=1 len=15]
|
||||
"173.164.155.78\n"
|
||||
[FrameHeader PING len=8]
|
||||
Data = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
h2i> ping
|
||||
[FrameHeader PING flags=ACK len=8]
|
||||
Data = "h2i_ping"
|
||||
h2i> ping
|
||||
[FrameHeader PING flags=ACK len=8]
|
||||
Data = "h2i_ping"
|
||||
h2i> ping
|
||||
[FrameHeader GOAWAY len=22]
|
||||
Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1)
|
||||
|
||||
ReadFrame: EOF
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
Quick few hour hack. So much yet to do. Feel free to file issues for
|
||||
bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/)
|
||||
and I aren't yet accepting pull requests until things settle down.
|
||||
|
||||
501
vendor/golang.org/x/net/http2/h2i/h2i.go
generated
vendored
Normal file
501
vendor/golang.org/x/net/http2/h2i/h2i.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
78
vendor/golang.org/x/net/http2/headermap.go
generated
vendored
Normal file
78
vendor/golang.org/x/net/http2/headermap.go
generated
vendored
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
|
||||
commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, v := range []string{
|
||||
"accept",
|
||||
"accept-charset",
|
||||
"accept-encoding",
|
||||
"accept-language",
|
||||
"accept-ranges",
|
||||
"age",
|
||||
"access-control-allow-origin",
|
||||
"allow",
|
||||
"authorization",
|
||||
"cache-control",
|
||||
"content-disposition",
|
||||
"content-encoding",
|
||||
"content-language",
|
||||
"content-length",
|
||||
"content-location",
|
||||
"content-range",
|
||||
"content-type",
|
||||
"cookie",
|
||||
"date",
|
||||
"etag",
|
||||
"expect",
|
||||
"expires",
|
||||
"from",
|
||||
"host",
|
||||
"if-match",
|
||||
"if-modified-since",
|
||||
"if-none-match",
|
||||
"if-unmodified-since",
|
||||
"last-modified",
|
||||
"link",
|
||||
"location",
|
||||
"max-forwards",
|
||||
"proxy-authenticate",
|
||||
"proxy-authorization",
|
||||
"range",
|
||||
"referer",
|
||||
"refresh",
|
||||
"retry-after",
|
||||
"server",
|
||||
"set-cookie",
|
||||
"strict-transport-security",
|
||||
"trailer",
|
||||
"transfer-encoding",
|
||||
"user-agent",
|
||||
"vary",
|
||||
"via",
|
||||
"www-authenticate",
|
||||
} {
|
||||
chk := http.CanonicalHeaderKey(v)
|
||||
commonLowerHeader[chk] = v
|
||||
commonCanonHeader[v] = chk
|
||||
}
|
||||
}
|
||||
|
||||
func lowerHeader(v string) string {
|
||||
if s, ok := commonLowerHeader[v]; ok {
|
||||
return s
|
||||
}
|
||||
return strings.ToLower(v)
|
||||
}
|
||||
251
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
Normal file
251
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
Normal file
|
|
@ -0,0 +1,251 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
uint32Max = ^uint32(0)
|
||||
initialHeaderTableSize = 4096
|
||||
)
|
||||
|
||||
type Encoder struct {
|
||||
dynTab dynamicTable
|
||||
// minSize is the minimum table size set by
|
||||
// SetMaxDynamicTableSize after the previous Header Table Size
|
||||
// Update.
|
||||
minSize uint32
|
||||
// maxSizeLimit is the maximum table size this encoder
|
||||
// supports. This will protect the encoder from too large
|
||||
// size.
|
||||
maxSizeLimit uint32
|
||||
// tableSizeUpdate indicates whether "Header Table Size
|
||||
// Update" is required.
|
||||
tableSizeUpdate bool
|
||||
w io.Writer
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// NewEncoder returns a new Encoder which performs HPACK encoding. An
|
||||
// encoded data is written to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
e := &Encoder{
|
||||
minSize: uint32Max,
|
||||
maxSizeLimit: initialHeaderTableSize,
|
||||
tableSizeUpdate: false,
|
||||
w: w,
|
||||
}
|
||||
e.dynTab.setMaxSize(initialHeaderTableSize)
|
||||
return e
|
||||
}
|
||||
|
||||
// WriteField encodes f into a single Write to e's underlying Writer.
|
||||
// This function may also produce bytes for "Header Table Size Update"
|
||||
// if necessary. If produced, it is done before encoding f.
|
||||
func (e *Encoder) WriteField(f HeaderField) error {
|
||||
e.buf = e.buf[:0]
|
||||
|
||||
if e.tableSizeUpdate {
|
||||
e.tableSizeUpdate = false
|
||||
if e.minSize < e.dynTab.maxSize {
|
||||
e.buf = appendTableSize(e.buf, e.minSize)
|
||||
}
|
||||
e.minSize = uint32Max
|
||||
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
|
||||
}
|
||||
|
||||
idx, nameValueMatch := e.searchTable(f)
|
||||
if nameValueMatch {
|
||||
e.buf = appendIndexed(e.buf, idx)
|
||||
} else {
|
||||
indexing := e.shouldIndex(f)
|
||||
if indexing {
|
||||
e.dynTab.add(f)
|
||||
}
|
||||
|
||||
if idx == 0 {
|
||||
e.buf = appendNewName(e.buf, f, indexing)
|
||||
} else {
|
||||
e.buf = appendIndexedName(e.buf, f, idx, indexing)
|
||||
}
|
||||
}
|
||||
n, err := e.w.Write(e.buf)
|
||||
if err == nil && n != len(e.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// searchTable searches f in both stable and dynamic header tables.
|
||||
// The static header table is searched first. Only when there is no
|
||||
// exact match for both name and value, the dynamic header table is
|
||||
// then searched. If there is no match, i is 0. If both name and value
|
||||
// match, i is the matched index and nameValueMatch becomes true. If
|
||||
// only name matches, i points to that index and nameValueMatch
|
||||
// becomes false.
|
||||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
for idx, hf := range staticTable {
|
||||
if !constantTimeStringCompare(hf.Name, f.Name) {
|
||||
continue
|
||||
}
|
||||
if i == 0 {
|
||||
i = uint64(idx + 1)
|
||||
}
|
||||
if f.Sensitive {
|
||||
continue
|
||||
}
|
||||
if !constantTimeStringCompare(hf.Value, f.Value) {
|
||||
continue
|
||||
}
|
||||
i = uint64(idx + 1)
|
||||
nameValueMatch = true
|
||||
return
|
||||
}
|
||||
|
||||
j, nameValueMatch := e.dynTab.search(f)
|
||||
if nameValueMatch || (i == 0 && j != 0) {
|
||||
i = j + uint64(len(staticTable))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
||||
// The actual size is bounded by the value passed to
|
||||
// SetMaxDynamicTableSizeLimit.
|
||||
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
|
||||
if v > e.maxSizeLimit {
|
||||
v = e.maxSizeLimit
|
||||
}
|
||||
if v < e.minSize {
|
||||
e.minSize = v
|
||||
}
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
||||
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
||||
// 4096, which is the same size of the default dynamic header table
|
||||
// size described in HPACK specification. If the current maximum
|
||||
// dynamic header table size is strictly greater than v, "Header Table
|
||||
// Size Update" will be done in the next WriteField call and the
|
||||
// maximum dynamic header table size is truncated to v.
|
||||
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
|
||||
e.maxSizeLimit = v
|
||||
if e.dynTab.maxSize > v {
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldIndex reports whether f should be indexed.
|
||||
func (e *Encoder) shouldIndex(f HeaderField) bool {
|
||||
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
|
||||
}
|
||||
|
||||
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendIndexed(dst []byte, i uint64) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, i)
|
||||
dst[first] |= 0x80
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendNewName appends f, as encoded in one of "Literal Header field
|
||||
// - New Name" representation variants, to dst and returns the
|
||||
// extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Inremental Indexing"
|
||||
// representation is used.
|
||||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
||||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
||||
dst = appendHpackString(dst, f.Name)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendIndexedName appends f and index i referring indexed name
|
||||
// entry, as encoded in one of "Literal Header field - Indexed Name"
|
||||
// representation variants, to dst and returns the extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
|
||||
first := len(dst)
|
||||
var n byte
|
||||
if indexing {
|
||||
n = 6
|
||||
} else {
|
||||
n = 4
|
||||
}
|
||||
dst = appendVarInt(dst, n, i)
|
||||
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendTableSize appends v, as encoded in "Header Table Size Update"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendTableSize(dst []byte, v uint32) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 5, uint64(v))
|
||||
dst[first] |= 0x20
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendVarInt appends i, as encoded in variable integer form using n
|
||||
// bit prefix, to dst and returns the extended buffer.
|
||||
//
|
||||
// See
|
||||
// http://http2.github.io/http2-spec/compression.html#integer.representation
|
||||
func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
||||
k := uint64((1 << n) - 1)
|
||||
if i < k {
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
dst = append(dst, byte(k))
|
||||
i -= k
|
||||
for ; i >= 128; i >>= 7 {
|
||||
dst = append(dst, byte(0x80|(i&0x7f)))
|
||||
}
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
|
||||
// appendHpackString appends s, as encoded in "String Literal"
|
||||
// representation, to dst and returns the the extended buffer.
|
||||
//
|
||||
// s will be encoded in Huffman codes only when it produces strictly
|
||||
// shorter byte string.
|
||||
func appendHpackString(dst []byte, s string) []byte {
|
||||
huffmanLength := HuffmanEncodeLength(s)
|
||||
if huffmanLength < uint64(len(s)) {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, huffmanLength)
|
||||
dst = AppendHuffmanString(dst, s)
|
||||
dst[first] |= 0x80
|
||||
} else {
|
||||
dst = appendVarInt(dst, 7, uint64(len(s)))
|
||||
dst = append(dst, s...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// encodeTypeByte returns type byte. If sensitive is true, type byte
|
||||
// for "Never Indexed" representation is returned. If sensitive is
|
||||
// false and indexing is true, type byte for "Incremental Indexing"
|
||||
// representation is returned. Otherwise, type byte for "Without
|
||||
// Indexing" is returned.
|
||||
func encodeTypeByte(indexing, sensitive bool) byte {
|
||||
if sensitive {
|
||||
return 0x10
|
||||
}
|
||||
if indexing {
|
||||
return 0x40
|
||||
}
|
||||
return 0
|
||||
}
|
||||
330
vendor/golang.org/x/net/http2/hpack/encode_test.go
generated
vendored
Normal file
330
vendor/golang.org/x/net/http2/hpack/encode_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,330 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncoderTableSizeUpdate(t *testing.T) {
|
||||
tests := []struct {
|
||||
size1, size2 uint32
|
||||
wantHex string
|
||||
}{
|
||||
// Should emit 2 table size updates (2048 and 4096)
|
||||
{2048, 4096, "3fe10f 3fe11f 82"},
|
||||
|
||||
// Should emit 1 table size update (2048)
|
||||
{16384, 2048, "3fe10f 82"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var buf bytes.Buffer
|
||||
e := NewEncoder(&buf)
|
||||
e.SetMaxDynamicTableSize(tt.size1)
|
||||
e.SetMaxDynamicTableSize(tt.size2)
|
||||
if err := e.WriteField(pair(":method", "GET")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := removeSpace(tt.wantHex)
|
||||
if got := hex.EncodeToString(buf.Bytes()); got != want {
|
||||
t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoderWriteField(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
e := NewEncoder(&buf)
|
||||
var got []HeaderField
|
||||
d := NewDecoder(4<<10, func(f HeaderField) {
|
||||
got = append(got, f)
|
||||
})
|
||||
|
||||
tests := []struct {
|
||||
hdrs []HeaderField
|
||||
}{
|
||||
{[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":path", "/"),
|
||||
pair(":authority", "www.example.com"),
|
||||
}},
|
||||
{[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":path", "/"),
|
||||
pair(":authority", "www.example.com"),
|
||||
pair("cache-control", "no-cache"),
|
||||
}},
|
||||
{[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "https"),
|
||||
pair(":path", "/index.html"),
|
||||
pair(":authority", "www.example.com"),
|
||||
pair("custom-key", "custom-value"),
|
||||
}},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
buf.Reset()
|
||||
got = got[:0]
|
||||
for _, hf := range tt.hdrs {
|
||||
if err := e.WriteField(hf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
_, err := d.Write(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("%d. Decoder Write = %v", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.hdrs) {
|
||||
t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoderSearchTable(t *testing.T) {
|
||||
e := NewEncoder(nil)
|
||||
|
||||
e.dynTab.add(pair("foo", "bar"))
|
||||
e.dynTab.add(pair("blake", "miz"))
|
||||
e.dynTab.add(pair(":method", "GET"))
|
||||
|
||||
tests := []struct {
|
||||
hf HeaderField
|
||||
wantI uint64
|
||||
wantMatch bool
|
||||
}{
|
||||
// Name and Value match
|
||||
{pair("foo", "bar"), uint64(len(staticTable) + 3), true},
|
||||
{pair("blake", "miz"), uint64(len(staticTable) + 2), true},
|
||||
{pair(":method", "GET"), 2, true},
|
||||
|
||||
// Only name match because Sensitive == true
|
||||
{HeaderField{":method", "GET", true}, 2, false},
|
||||
|
||||
// Only Name matches
|
||||
{pair("foo", "..."), uint64(len(staticTable) + 3), false},
|
||||
{pair("blake", "..."), uint64(len(staticTable) + 2), false},
|
||||
{pair(":method", "..."), 2, false},
|
||||
|
||||
// None match
|
||||
{pair("foo-", "bar"), 0, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
|
||||
t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendVarInt(t *testing.T) {
|
||||
tests := []struct {
|
||||
n byte
|
||||
i uint64
|
||||
want []byte
|
||||
}{
|
||||
// Fits in a byte:
|
||||
{1, 0, []byte{0}},
|
||||
{2, 2, []byte{2}},
|
||||
{3, 6, []byte{6}},
|
||||
{4, 14, []byte{14}},
|
||||
{5, 30, []byte{30}},
|
||||
{6, 62, []byte{62}},
|
||||
{7, 126, []byte{126}},
|
||||
{8, 254, []byte{254}},
|
||||
|
||||
// Multiple bytes:
|
||||
{5, 1337, []byte{31, 154, 10}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := appendVarInt(nil, tt.n, tt.i)
|
||||
if !bytes.Equal(got, tt.want) {
|
||||
t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendHpackString(t *testing.T) {
|
||||
tests := []struct {
|
||||
s, wantHex string
|
||||
}{
|
||||
// Huffman encoded
|
||||
{"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
|
||||
|
||||
// Not Huffman encoded
|
||||
{"a", "01 61"},
|
||||
|
||||
// zero length
|
||||
{"", "00"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
want := removeSpace(tt.wantHex)
|
||||
buf := appendHpackString(nil, tt.s)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendIndexed(t *testing.T) {
|
||||
tests := []struct {
|
||||
i uint64
|
||||
wantHex string
|
||||
}{
|
||||
// 1 byte
|
||||
{1, "81"},
|
||||
{126, "fe"},
|
||||
|
||||
// 2 bytes
|
||||
{127, "ff00"},
|
||||
{128, "ff01"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
want := removeSpace(tt.wantHex)
|
||||
buf := appendIndexed(nil, tt.i)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendNewName(t *testing.T) {
|
||||
tests := []struct {
|
||||
f HeaderField
|
||||
indexing bool
|
||||
wantHex string
|
||||
}{
|
||||
// Incremental indexing
|
||||
{HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
|
||||
|
||||
// Without indexing
|
||||
{HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
|
||||
|
||||
// Never indexed
|
||||
{HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
|
||||
{HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
want := removeSpace(tt.wantHex)
|
||||
buf := appendNewName(nil, tt.f, tt.indexing)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendIndexedName(t *testing.T) {
|
||||
tests := []struct {
|
||||
f HeaderField
|
||||
i uint64
|
||||
indexing bool
|
||||
wantHex string
|
||||
}{
|
||||
// Incremental indexing
|
||||
{HeaderField{":status", "302", false}, 8, true, "48 82 6402"},
|
||||
|
||||
// Without indexing
|
||||
{HeaderField{":status", "302", false}, 8, false, "08 82 6402"},
|
||||
|
||||
// Never indexed
|
||||
{HeaderField{":status", "302", true}, 8, true, "18 82 6402"},
|
||||
{HeaderField{":status", "302", true}, 8, false, "18 82 6402"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
want := removeSpace(tt.wantHex)
|
||||
buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendTableSize(t *testing.T) {
|
||||
tests := []struct {
|
||||
i uint32
|
||||
wantHex string
|
||||
}{
|
||||
// Fits into 1 byte
|
||||
{30, "3e"},
|
||||
|
||||
// Extra byte
|
||||
{31, "3f00"},
|
||||
{32, "3f01"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
want := removeSpace(tt.wantHex)
|
||||
buf := appendTableSize(nil, tt.i)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoderSetMaxDynamicTableSize(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
e := NewEncoder(&buf)
|
||||
tests := []struct {
|
||||
v uint32
|
||||
wantUpdate bool
|
||||
wantMinSize uint32
|
||||
wantMaxSize uint32
|
||||
}{
|
||||
// Set new table size to 2048
|
||||
{2048, true, 2048, 2048},
|
||||
|
||||
// Set new table size to 16384, but still limited to
|
||||
// 4096
|
||||
{16384, true, 2048, 4096},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
e.SetMaxDynamicTableSize(tt.v)
|
||||
if got := e.tableSizeUpdate; tt.wantUpdate != got {
|
||||
t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate)
|
||||
}
|
||||
if got := e.minSize; tt.wantMinSize != got {
|
||||
t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize)
|
||||
}
|
||||
if got := e.dynTab.maxSize; tt.wantMaxSize != got {
|
||||
t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {
|
||||
e := NewEncoder(nil)
|
||||
// 4095 < initialHeaderTableSize means maxSize is truncated to
|
||||
// 4095.
|
||||
e.SetMaxDynamicTableSizeLimit(4095)
|
||||
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
|
||||
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
|
||||
}
|
||||
if got, want := e.maxSizeLimit, uint32(4095); got != want {
|
||||
t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
|
||||
}
|
||||
if got, want := e.tableSizeUpdate, true; got != want {
|
||||
t.Errorf("e.tableSizeUpdate = %v; want %v", got, want)
|
||||
}
|
||||
// maxSize will be truncated to maxSizeLimit
|
||||
e.SetMaxDynamicTableSize(16384)
|
||||
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
|
||||
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
|
||||
}
|
||||
// 8192 > current maxSizeLimit, so maxSize does not change.
|
||||
e.SetMaxDynamicTableSizeLimit(8192)
|
||||
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
|
||||
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
|
||||
}
|
||||
if got, want := e.maxSizeLimit, uint32(8192); got != want {
|
||||
t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func removeSpace(s string) string {
|
||||
return strings.Replace(s, " ", "", -1)
|
||||
}
|
||||
542
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
Normal file
542
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
813
vendor/golang.org/x/net/http2/hpack/hpack_test.go
generated
vendored
Normal file
813
vendor/golang.org/x/net/http2/hpack/hpack_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
190
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
Normal file
190
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
Normal file
|
|
@ -0,0 +1,190 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} { return new(bytes.Buffer) },
|
||||
}
|
||||
|
||||
// HuffmanDecode decodes the string in v and writes the expanded
|
||||
// result to w, returning the number of bytes written to w and the
|
||||
// Write call's return value. At most one Write call is made.
|
||||
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// HuffmanDecodeToString decodes the string in v.
|
||||
func HuffmanDecodeToString(v []byte) (string, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ErrInvalidHuffman is returned for errors found decoding
|
||||
// Huffman-encoded strings.
|
||||
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
|
||||
|
||||
// huffmanDecode decodes v to buf.
|
||||
// If maxLen is greater than 0, attempts to write more to buf than
|
||||
// maxLen bytes will return ErrStringLength.
|
||||
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
n := rootHuffmanNode
|
||||
cur, nbits := uint(0), uint8(0)
|
||||
for _, b := range v {
|
||||
cur = cur<<8 | uint(b)
|
||||
nbits += 8
|
||||
for nbits >= 8 {
|
||||
idx := byte(cur >> (nbits - 8))
|
||||
n = n.children[idx]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children == nil {
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
nbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
} else {
|
||||
nbits -= 8
|
||||
}
|
||||
}
|
||||
}
|
||||
for nbits > 0 {
|
||||
n = n.children[byte(cur<<(8-nbits))]
|
||||
if n.children != nil || n.codeLen > nbits {
|
||||
break
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
nbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type node struct {
|
||||
// children is non-nil for internal nodes
|
||||
children []*node
|
||||
|
||||
// The following are only valid if children is nil:
|
||||
codeLen uint8 // number of bits that led to the output of sym
|
||||
sym byte // output symbol
|
||||
}
|
||||
|
||||
func newInternalNode() *node {
|
||||
return &node{children: make([]*node, 256)}
|
||||
}
|
||||
|
||||
var rootHuffmanNode = newInternalNode()
|
||||
|
||||
func init() {
|
||||
if len(huffmanCodes) != 256 {
|
||||
panic("unexpected size")
|
||||
}
|
||||
for i, code := range huffmanCodes {
|
||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
||||
}
|
||||
}
|
||||
|
||||
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
||||
cur := rootHuffmanNode
|
||||
for codeLen > 8 {
|
||||
codeLen -= 8
|
||||
i := uint8(code >> codeLen)
|
||||
if cur.children[i] == nil {
|
||||
cur.children[i] = newInternalNode()
|
||||
}
|
||||
cur = cur.children[i]
|
||||
}
|
||||
shift := 8 - codeLen
|
||||
start, end := int(uint8(code<<shift)), int(1<<shift)
|
||||
for i := start; i < start+end; i++ {
|
||||
cur.children[i] = &node{sym: sym, codeLen: codeLen}
|
||||
}
|
||||
}
|
||||
|
||||
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
|
||||
// and returns the extended buffer.
|
||||
func AppendHuffmanString(dst []byte, s string) []byte {
|
||||
rembits := uint8(8)
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
if rembits == 8 {
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
|
||||
}
|
||||
|
||||
if rembits < 8 {
|
||||
// special EOS symbol
|
||||
code := uint32(0x3fffffff)
|
||||
nbits := uint8(30)
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// HuffmanEncodeLength returns the number of bytes required to encode
|
||||
// s in Huffman codes. The result is round up to byte boundary.
|
||||
func HuffmanEncodeLength(s string) uint64 {
|
||||
n := uint64(0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
n += uint64(huffmanCodeLen[s[i]])
|
||||
}
|
||||
return (n + 7) / 8
|
||||
}
|
||||
|
||||
// appendByteToHuffmanCode appends Huffman code for c to dst and
|
||||
// returns the extended buffer and the remaining bits in the last
|
||||
// element. The appending is not byte aligned and the remaining bits
|
||||
// in the last element of dst is given in rembits.
|
||||
func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
|
||||
code := huffmanCodes[c]
|
||||
nbits := huffmanCodeLen[c]
|
||||
|
||||
for {
|
||||
if rembits > nbits {
|
||||
t := uint8(code << (rembits - nbits))
|
||||
dst[len(dst)-1] |= t
|
||||
rembits -= nbits
|
||||
break
|
||||
}
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
|
||||
nbits -= rembits
|
||||
rembits = 8
|
||||
|
||||
if nbits == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
|
||||
return dst, rembits
|
||||
}
|
||||
352
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
Normal file
352
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
Normal file
|
|
@ -0,0 +1,352 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
func pair(name, value string) HeaderField {
|
||||
return HeaderField{Name: name, Value: value}
|
||||
}
|
||||
|
||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||
var staticTable = [...]HeaderField{
|
||||
pair(":authority", ""), // index 1 (1-based)
|
||||
pair(":method", "GET"),
|
||||
pair(":method", "POST"),
|
||||
pair(":path", "/"),
|
||||
pair(":path", "/index.html"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":scheme", "https"),
|
||||
pair(":status", "200"),
|
||||
pair(":status", "204"),
|
||||
pair(":status", "206"),
|
||||
pair(":status", "304"),
|
||||
pair(":status", "400"),
|
||||
pair(":status", "404"),
|
||||
pair(":status", "500"),
|
||||
pair("accept-charset", ""),
|
||||
pair("accept-encoding", "gzip, deflate"),
|
||||
pair("accept-language", ""),
|
||||
pair("accept-ranges", ""),
|
||||
pair("accept", ""),
|
||||
pair("access-control-allow-origin", ""),
|
||||
pair("age", ""),
|
||||
pair("allow", ""),
|
||||
pair("authorization", ""),
|
||||
pair("cache-control", ""),
|
||||
pair("content-disposition", ""),
|
||||
pair("content-encoding", ""),
|
||||
pair("content-language", ""),
|
||||
pair("content-length", ""),
|
||||
pair("content-location", ""),
|
||||
pair("content-range", ""),
|
||||
pair("content-type", ""),
|
||||
pair("cookie", ""),
|
||||
pair("date", ""),
|
||||
pair("etag", ""),
|
||||
pair("expect", ""),
|
||||
pair("expires", ""),
|
||||
pair("from", ""),
|
||||
pair("host", ""),
|
||||
pair("if-match", ""),
|
||||
pair("if-modified-since", ""),
|
||||
pair("if-none-match", ""),
|
||||
pair("if-range", ""),
|
||||
pair("if-unmodified-since", ""),
|
||||
pair("last-modified", ""),
|
||||
pair("link", ""),
|
||||
pair("location", ""),
|
||||
pair("max-forwards", ""),
|
||||
pair("proxy-authenticate", ""),
|
||||
pair("proxy-authorization", ""),
|
||||
pair("range", ""),
|
||||
pair("referer", ""),
|
||||
pair("refresh", ""),
|
||||
pair("retry-after", ""),
|
||||
pair("server", ""),
|
||||
pair("set-cookie", ""),
|
||||
pair("strict-transport-security", ""),
|
||||
pair("transfer-encoding", ""),
|
||||
pair("user-agent", ""),
|
||||
pair("vary", ""),
|
||||
pair("via", ""),
|
||||
pair("www-authenticate", ""),
|
||||
}
|
||||
|
||||
var huffmanCodes = [256]uint32{
|
||||
0x1ff8,
|
||||
0x7fffd8,
|
||||
0xfffffe2,
|
||||
0xfffffe3,
|
||||
0xfffffe4,
|
||||
0xfffffe5,
|
||||
0xfffffe6,
|
||||
0xfffffe7,
|
||||
0xfffffe8,
|
||||
0xffffea,
|
||||
0x3ffffffc,
|
||||
0xfffffe9,
|
||||
0xfffffea,
|
||||
0x3ffffffd,
|
||||
0xfffffeb,
|
||||
0xfffffec,
|
||||
0xfffffed,
|
||||
0xfffffee,
|
||||
0xfffffef,
|
||||
0xffffff0,
|
||||
0xffffff1,
|
||||
0xffffff2,
|
||||
0x3ffffffe,
|
||||
0xffffff3,
|
||||
0xffffff4,
|
||||
0xffffff5,
|
||||
0xffffff6,
|
||||
0xffffff7,
|
||||
0xffffff8,
|
||||
0xffffff9,
|
||||
0xffffffa,
|
||||
0xffffffb,
|
||||
0x14,
|
||||
0x3f8,
|
||||
0x3f9,
|
||||
0xffa,
|
||||
0x1ff9,
|
||||
0x15,
|
||||
0xf8,
|
||||
0x7fa,
|
||||
0x3fa,
|
||||
0x3fb,
|
||||
0xf9,
|
||||
0x7fb,
|
||||
0xfa,
|
||||
0x16,
|
||||
0x17,
|
||||
0x18,
|
||||
0x0,
|
||||
0x1,
|
||||
0x2,
|
||||
0x19,
|
||||
0x1a,
|
||||
0x1b,
|
||||
0x1c,
|
||||
0x1d,
|
||||
0x1e,
|
||||
0x1f,
|
||||
0x5c,
|
||||
0xfb,
|
||||
0x7ffc,
|
||||
0x20,
|
||||
0xffb,
|
||||
0x3fc,
|
||||
0x1ffa,
|
||||
0x21,
|
||||
0x5d,
|
||||
0x5e,
|
||||
0x5f,
|
||||
0x60,
|
||||
0x61,
|
||||
0x62,
|
||||
0x63,
|
||||
0x64,
|
||||
0x65,
|
||||
0x66,
|
||||
0x67,
|
||||
0x68,
|
||||
0x69,
|
||||
0x6a,
|
||||
0x6b,
|
||||
0x6c,
|
||||
0x6d,
|
||||
0x6e,
|
||||
0x6f,
|
||||
0x70,
|
||||
0x71,
|
||||
0x72,
|
||||
0xfc,
|
||||
0x73,
|
||||
0xfd,
|
||||
0x1ffb,
|
||||
0x7fff0,
|
||||
0x1ffc,
|
||||
0x3ffc,
|
||||
0x22,
|
||||
0x7ffd,
|
||||
0x3,
|
||||
0x23,
|
||||
0x4,
|
||||
0x24,
|
||||
0x5,
|
||||
0x25,
|
||||
0x26,
|
||||
0x27,
|
||||
0x6,
|
||||
0x74,
|
||||
0x75,
|
||||
0x28,
|
||||
0x29,
|
||||
0x2a,
|
||||
0x7,
|
||||
0x2b,
|
||||
0x76,
|
||||
0x2c,
|
||||
0x8,
|
||||
0x9,
|
||||
0x2d,
|
||||
0x77,
|
||||
0x78,
|
||||
0x79,
|
||||
0x7a,
|
||||
0x7b,
|
||||
0x7ffe,
|
||||
0x7fc,
|
||||
0x3ffd,
|
||||
0x1ffd,
|
||||
0xffffffc,
|
||||
0xfffe6,
|
||||
0x3fffd2,
|
||||
0xfffe7,
|
||||
0xfffe8,
|
||||
0x3fffd3,
|
||||
0x3fffd4,
|
||||
0x3fffd5,
|
||||
0x7fffd9,
|
||||
0x3fffd6,
|
||||
0x7fffda,
|
||||
0x7fffdb,
|
||||
0x7fffdc,
|
||||
0x7fffdd,
|
||||
0x7fffde,
|
||||
0xffffeb,
|
||||
0x7fffdf,
|
||||
0xffffec,
|
||||
0xffffed,
|
||||
0x3fffd7,
|
||||
0x7fffe0,
|
||||
0xffffee,
|
||||
0x7fffe1,
|
||||
0x7fffe2,
|
||||
0x7fffe3,
|
||||
0x7fffe4,
|
||||
0x1fffdc,
|
||||
0x3fffd8,
|
||||
0x7fffe5,
|
||||
0x3fffd9,
|
||||
0x7fffe6,
|
||||
0x7fffe7,
|
||||
0xffffef,
|
||||
0x3fffda,
|
||||
0x1fffdd,
|
||||
0xfffe9,
|
||||
0x3fffdb,
|
||||
0x3fffdc,
|
||||
0x7fffe8,
|
||||
0x7fffe9,
|
||||
0x1fffde,
|
||||
0x7fffea,
|
||||
0x3fffdd,
|
||||
0x3fffde,
|
||||
0xfffff0,
|
||||
0x1fffdf,
|
||||
0x3fffdf,
|
||||
0x7fffeb,
|
||||
0x7fffec,
|
||||
0x1fffe0,
|
||||
0x1fffe1,
|
||||
0x3fffe0,
|
||||
0x1fffe2,
|
||||
0x7fffed,
|
||||
0x3fffe1,
|
||||
0x7fffee,
|
||||
0x7fffef,
|
||||
0xfffea,
|
||||
0x3fffe2,
|
||||
0x3fffe3,
|
||||
0x3fffe4,
|
||||
0x7ffff0,
|
||||
0x3fffe5,
|
||||
0x3fffe6,
|
||||
0x7ffff1,
|
||||
0x3ffffe0,
|
||||
0x3ffffe1,
|
||||
0xfffeb,
|
||||
0x7fff1,
|
||||
0x3fffe7,
|
||||
0x7ffff2,
|
||||
0x3fffe8,
|
||||
0x1ffffec,
|
||||
0x3ffffe2,
|
||||
0x3ffffe3,
|
||||
0x3ffffe4,
|
||||
0x7ffffde,
|
||||
0x7ffffdf,
|
||||
0x3ffffe5,
|
||||
0xfffff1,
|
||||
0x1ffffed,
|
||||
0x7fff2,
|
||||
0x1fffe3,
|
||||
0x3ffffe6,
|
||||
0x7ffffe0,
|
||||
0x7ffffe1,
|
||||
0x3ffffe7,
|
||||
0x7ffffe2,
|
||||
0xfffff2,
|
||||
0x1fffe4,
|
||||
0x1fffe5,
|
||||
0x3ffffe8,
|
||||
0x3ffffe9,
|
||||
0xffffffd,
|
||||
0x7ffffe3,
|
||||
0x7ffffe4,
|
||||
0x7ffffe5,
|
||||
0xfffec,
|
||||
0xfffff3,
|
||||
0xfffed,
|
||||
0x1fffe6,
|
||||
0x3fffe9,
|
||||
0x1fffe7,
|
||||
0x1fffe8,
|
||||
0x7ffff3,
|
||||
0x3fffea,
|
||||
0x3fffeb,
|
||||
0x1ffffee,
|
||||
0x1ffffef,
|
||||
0xfffff4,
|
||||
0xfffff5,
|
||||
0x3ffffea,
|
||||
0x7ffff4,
|
||||
0x3ffffeb,
|
||||
0x7ffffe6,
|
||||
0x3ffffec,
|
||||
0x3ffffed,
|
||||
0x7ffffe7,
|
||||
0x7ffffe8,
|
||||
0x7ffffe9,
|
||||
0x7ffffea,
|
||||
0x7ffffeb,
|
||||
0xffffffe,
|
||||
0x7ffffec,
|
||||
0x7ffffed,
|
||||
0x7ffffee,
|
||||
0x7ffffef,
|
||||
0x7fffff0,
|
||||
0x3ffffee,
|
||||
}
|
||||
|
||||
var huffmanCodeLen = [256]uint8{
|
||||
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
|
||||
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
|
||||
13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
|
||||
15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
|
||||
6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
|
||||
20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
|
||||
24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
|
||||
22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
|
||||
21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
|
||||
26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
|
||||
19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
|
||||
20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
|
||||
26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
|
||||
}
|
||||
464
vendor/golang.org/x/net/http2/http2.go
generated
vendored
Normal file
464
vendor/golang.org/x/net/http2/http2.go
generated
vendored
Normal file
|
|
@ -0,0 +1,464 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package http2 implements the HTTP/2 protocol.
|
||||
//
|
||||
// This package is low-level and intended to be used directly by very
|
||||
// few people. Most users will use it indirectly through the automatic
|
||||
// use by the net/http package (from Go 1.6 and later).
|
||||
// For use in earlier Go versions see ConfigureServer. (Transport support
|
||||
// requires Go 1.6 or later)
|
||||
//
|
||||
// See https://http2.github.io/ for more information on HTTP/2.
|
||||
//
|
||||
// See https://http2.golang.org/ for a test server running this code.
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
VerboseLogs bool
|
||||
logFrameWrites bool
|
||||
logFrameReads bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
e := os.Getenv("GODEBUG")
|
||||
if strings.Contains(e, "http2debug=1") {
|
||||
VerboseLogs = true
|
||||
}
|
||||
if strings.Contains(e, "http2debug=2") {
|
||||
VerboseLogs = true
|
||||
logFrameWrites = true
|
||||
logFrameReads = true
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ClientPreface is the string that must be sent by new
|
||||
// connections from clients.
|
||||
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
|
||||
|
||||
// SETTINGS_MAX_FRAME_SIZE default
|
||||
// http://http2.github.io/http2-spec/#rfc.section.6.5.2
|
||||
initialMaxFrameSize = 16384
|
||||
|
||||
// NextProtoTLS is the NPN/ALPN protocol negotiated during
|
||||
// HTTP/2's TLS setup.
|
||||
NextProtoTLS = "h2"
|
||||
|
||||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
initialHeaderTableSize = 4096
|
||||
|
||||
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
|
||||
|
||||
defaultMaxReadFrameSize = 1 << 20
|
||||
)
|
||||
|
||||
var (
|
||||
clientPreface = []byte(ClientPreface)
|
||||
)
|
||||
|
||||
type streamState int
|
||||
|
||||
const (
|
||||
stateIdle streamState = iota
|
||||
stateOpen
|
||||
stateHalfClosedLocal
|
||||
stateHalfClosedRemote
|
||||
stateResvLocal
|
||||
stateResvRemote
|
||||
stateClosed
|
||||
)
|
||||
|
||||
var stateName = [...]string{
|
||||
stateIdle: "Idle",
|
||||
stateOpen: "Open",
|
||||
stateHalfClosedLocal: "HalfClosedLocal",
|
||||
stateHalfClosedRemote: "HalfClosedRemote",
|
||||
stateResvLocal: "ResvLocal",
|
||||
stateResvRemote: "ResvRemote",
|
||||
stateClosed: "Closed",
|
||||
}
|
||||
|
||||
func (st streamState) String() string {
|
||||
return stateName[st]
|
||||
}
|
||||
|
||||
// Setting is a setting parameter: which setting it is, and its value.
|
||||
type Setting struct {
|
||||
// ID is which setting is being set.
|
||||
// See http://http2.github.io/http2-spec/#SettingValues
|
||||
ID SettingID
|
||||
|
||||
// Val is the value.
|
||||
Val uint32
|
||||
}
|
||||
|
||||
func (s Setting) String() string {
|
||||
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
|
||||
}
|
||||
|
||||
// Valid reports whether the setting is valid.
|
||||
func (s Setting) Valid() error {
|
||||
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
|
||||
switch s.ID {
|
||||
case SettingEnablePush:
|
||||
if s.Val != 1 && s.Val != 0 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
case SettingInitialWindowSize:
|
||||
if s.Val > 1<<31-1 {
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
case SettingMaxFrameSize:
|
||||
if s.Val < 16384 || s.Val > 1<<24-1 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SettingID is an HTTP/2 setting as defined in
|
||||
// http://http2.github.io/http2-spec/#iana-settings
|
||||
type SettingID uint16
|
||||
|
||||
const (
|
||||
SettingHeaderTableSize SettingID = 0x1
|
||||
SettingEnablePush SettingID = 0x2
|
||||
SettingMaxConcurrentStreams SettingID = 0x3
|
||||
SettingInitialWindowSize SettingID = 0x4
|
||||
SettingMaxFrameSize SettingID = 0x5
|
||||
SettingMaxHeaderListSize SettingID = 0x6
|
||||
)
|
||||
|
||||
var settingName = map[SettingID]string{
|
||||
SettingHeaderTableSize: "HEADER_TABLE_SIZE",
|
||||
SettingEnablePush: "ENABLE_PUSH",
|
||||
SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
|
||||
SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
|
||||
SettingMaxFrameSize: "MAX_FRAME_SIZE",
|
||||
SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
|
||||
}
|
||||
|
||||
func (s SettingID) String() string {
|
||||
if v, ok := settingName[s]; ok {
|
||||
return v
|
||||
}
|
||||
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
|
||||
errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
|
||||
)
|
||||
|
||||
// validHeaderFieldName reports whether v is a valid header field name (key).
|
||||
// RFC 7230 says:
|
||||
// header-field = field-name ":" OWS field-value OWS
|
||||
// field-name = token
|
||||
// token = 1*tchar
|
||||
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
|
||||
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
|
||||
// Further, http2 says:
|
||||
// "Just as in HTTP/1.x, header field names are strings of ASCII
|
||||
// characters that are compared in a case-insensitive
|
||||
// fashion. However, header field names MUST be converted to
|
||||
// lowercase prior to their encoding in HTTP/2. "
|
||||
func validHeaderFieldName(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
if int(r) >= len(isTokenTable) || ('A' <= r && r <= 'Z') {
|
||||
return false
|
||||
}
|
||||
if !isTokenTable[byte(r)] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// validHeaderFieldValue reports whether v is a valid header field value.
|
||||
//
|
||||
// RFC 7230 says:
|
||||
// field-value = *( field-content / obs-fold )
|
||||
// obj-fold = N/A to http2, and deprecated
|
||||
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
|
||||
// field-vchar = VCHAR / obs-text
|
||||
// obs-text = %x80-FF
|
||||
// VCHAR = "any visible [USASCII] character"
|
||||
//
|
||||
// http2 further says: "Similarly, HTTP/2 allows header field values
|
||||
// that are not valid. While most of the values that can be encoded
|
||||
// will not alter header field parsing, carriage return (CR, ASCII
|
||||
// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
|
||||
// 0x0) might be exploited by an attacker if they are translated
|
||||
// verbatim. Any request or response that contains a character not
|
||||
// permitted in a header field value MUST be treated as malformed
|
||||
// (Section 8.1.2.6). Valid characters are defined by the
|
||||
// field-content ABNF rule in Section 3.2 of [RFC7230]."
|
||||
//
|
||||
// This function does not (yet?) properly handle the rejection of
|
||||
// strings that begin or end with SP or HTAB.
|
||||
func validHeaderFieldValue(v string) bool {
|
||||
for i := 0; i < len(v); i++ {
|
||||
if b := v[i]; b < ' ' && b != '\t' || b == 0x7f {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
|
||||
|
||||
func init() {
|
||||
for i := 100; i <= 999; i++ {
|
||||
if v := http.StatusText(i); v != "" {
|
||||
httpCodeStringCommon[i] = strconv.Itoa(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func httpCodeString(code int) string {
|
||||
if s, ok := httpCodeStringCommon[code]; ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(code)
|
||||
}
|
||||
|
||||
// from pkg io
|
||||
type stringWriter interface {
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
// A gate lets two goroutines coordinate their activities.
|
||||
type gate chan struct{}
|
||||
|
||||
func (g gate) Done() { g <- struct{}{} }
|
||||
func (g gate) Wait() { <-g }
|
||||
|
||||
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
|
||||
type closeWaiter chan struct{}
|
||||
|
||||
// Init makes a closeWaiter usable.
|
||||
// It exists because so a closeWaiter value can be placed inside a
|
||||
// larger struct and have the Mutex and Cond's memory in the same
|
||||
// allocation.
|
||||
func (cw *closeWaiter) Init() {
|
||||
*cw = make(chan struct{})
|
||||
}
|
||||
|
||||
// Close marks the closeWaiter as closed and unblocks any waiters.
|
||||
func (cw closeWaiter) Close() {
|
||||
close(cw)
|
||||
}
|
||||
|
||||
// Wait waits for the closeWaiter to become closed.
|
||||
func (cw closeWaiter) Wait() {
|
||||
<-cw
|
||||
}
|
||||
|
||||
// bufferedWriter is a buffered writer that writes to w.
|
||||
// Its buffered writer is lazily allocated as needed, to minimize
|
||||
// idle memory usage with many connections.
|
||||
type bufferedWriter struct {
|
||||
w io.Writer // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
}
|
||||
|
||||
func newBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
return &bufferedWriter{w: w}
|
||||
}
|
||||
|
||||
var bufWriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
// TODO: pick something better? this is a bit under
|
||||
// (3 x typical 1500 byte MTU) at least.
|
||||
return bufio.NewWriterSize(nil, 4<<10)
|
||||
},
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
||||
if w.bw == nil {
|
||||
bw := bufWriterPool.Get().(*bufio.Writer)
|
||||
bw.Reset(w.w)
|
||||
w.bw = bw
|
||||
}
|
||||
return w.bw.Write(p)
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Flush() error {
|
||||
bw := w.bw
|
||||
if bw == nil {
|
||||
return nil
|
||||
}
|
||||
err := bw.Flush()
|
||||
bw.Reset(nil)
|
||||
bufWriterPool.Put(bw)
|
||||
w.bw = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func mustUint31(v int32) uint32 {
|
||||
if v < 0 || v > 2147483647 {
|
||||
panic("out of range")
|
||||
}
|
||||
return uint32(v)
|
||||
}
|
||||
|
||||
// bodyAllowedForStatus reports whether a given response status code
|
||||
// permits a body. See RFC 2616, section 4.4.
|
||||
func bodyAllowedForStatus(status int) bool {
|
||||
switch {
|
||||
case status >= 100 && status <= 199:
|
||||
return false
|
||||
case status == 204:
|
||||
return false
|
||||
case status == 304:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
msg string
|
||||
timeout bool
|
||||
}
|
||||
|
||||
func (e *httpError) Error() string { return e.msg }
|
||||
func (e *httpError) Timeout() bool { return e.timeout }
|
||||
func (e *httpError) Temporary() bool { return true }
|
||||
|
||||
var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
|
||||
|
||||
var isTokenTable = [127]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
type connectionStater interface {
|
||||
ConnectionState() tls.ConnectionState
|
||||
}
|
||||
|
||||
var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
|
||||
|
||||
type sorter struct {
|
||||
v []string // owned by sorter
|
||||
}
|
||||
|
||||
func (s *sorter) Len() int { return len(s.v) }
|
||||
func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
|
||||
func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
|
||||
|
||||
// Keys returns the sorted keys of h.
|
||||
//
|
||||
// The returned slice is only valid until s used again or returned to
|
||||
// its pool.
|
||||
func (s *sorter) Keys(h http.Header) []string {
|
||||
keys := s.v[:0]
|
||||
for k := range h {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
s.v = keys
|
||||
sort.Sort(s)
|
||||
return keys
|
||||
}
|
||||
|
||||
func (s *sorter) SortStrings(ss []string) {
|
||||
// Our sorter works on s.v, which sorter owners, so
|
||||
// stash it away while we sort the user's buffer.
|
||||
save := s.v
|
||||
s.v = ss
|
||||
sort.Sort(s)
|
||||
s.v = save
|
||||
}
|
||||
198
vendor/golang.org/x/net/http2/http2_test.go
generated
vendored
Normal file
198
vendor/golang.org/x/net/http2/http2_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,198 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.")
|
||||
|
||||
func condSkipFailingTest(t *testing.T) {
|
||||
if !*knownFailing {
|
||||
t.Skip("Skipping known-failing test without --known_failing")
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
DebugGoroutines = true
|
||||
flag.BoolVar(&VerboseLogs, "verboseh2", false, "Verbose HTTP/2 debug logging")
|
||||
}
|
||||
|
||||
func TestSettingString(t *testing.T) {
|
||||
tests := []struct {
|
||||
s Setting
|
||||
want string
|
||||
}{
|
||||
{Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"},
|
||||
{Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
got := fmt.Sprint(tt.s)
|
||||
if got != tt.want {
|
||||
t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type twriter struct {
|
||||
t testing.TB
|
||||
st *serverTester // optional
|
||||
}
|
||||
|
||||
func (w twriter) Write(p []byte) (n int, err error) {
|
||||
if w.st != nil {
|
||||
ps := string(p)
|
||||
for _, phrase := range w.st.logFilter {
|
||||
if strings.Contains(ps, phrase) {
|
||||
return len(p), nil // no logging
|
||||
}
|
||||
}
|
||||
}
|
||||
w.t.Logf("%s", p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// like encodeHeader, but don't add implicit pseudo headers.
|
||||
func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {
|
||||
var buf bytes.Buffer
|
||||
enc := hpack.NewEncoder(&buf)
|
||||
for len(headers) > 0 {
|
||||
k, v := headers[0], headers[1]
|
||||
headers = headers[2:]
|
||||
if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil {
|
||||
t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
|
||||
}
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// Verify that curl has http2.
|
||||
func requireCurl(t *testing.T) {
|
||||
out, err := dockerLogs(curl(t, "--version"))
|
||||
if err != nil {
|
||||
t.Skipf("failed to determine curl features; skipping test")
|
||||
}
|
||||
if !strings.Contains(string(out), "HTTP2") {
|
||||
t.Skip("curl doesn't support HTTP2; skipping test")
|
||||
}
|
||||
}
|
||||
|
||||
func curl(t *testing.T, args ...string) (container string) {
|
||||
out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output()
|
||||
if err != nil {
|
||||
t.Skipf("Failed to run curl in docker: %v, %s", err, out)
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
// Verify that h2load exists.
|
||||
func requireH2load(t *testing.T) {
|
||||
out, err := dockerLogs(h2load(t, "--version"))
|
||||
if err != nil {
|
||||
t.Skipf("failed to probe h2load; skipping test: %s", out)
|
||||
}
|
||||
if !strings.Contains(string(out), "h2load nghttp2/") {
|
||||
t.Skipf("h2load not present; skipping test. (Output=%q)", out)
|
||||
}
|
||||
}
|
||||
|
||||
func h2load(t *testing.T, args ...string) (container string) {
|
||||
out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output()
|
||||
if err != nil {
|
||||
t.Skipf("Failed to run h2load in docker: %v, %s", err, out)
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
type puppetCommand struct {
|
||||
fn func(w http.ResponseWriter, r *http.Request)
|
||||
done chan<- bool
|
||||
}
|
||||
|
||||
type handlerPuppet struct {
|
||||
ch chan puppetCommand
|
||||
}
|
||||
|
||||
func newHandlerPuppet() *handlerPuppet {
|
||||
return &handlerPuppet{
|
||||
ch: make(chan puppetCommand),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) {
|
||||
for cmd := range p.ch {
|
||||
cmd.fn(w, r)
|
||||
cmd.done <- true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *handlerPuppet) done() { close(p.ch) }
|
||||
func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {
|
||||
done := make(chan bool)
|
||||
p.ch <- puppetCommand{fn, done}
|
||||
<-done
|
||||
}
|
||||
func dockerLogs(container string) ([]byte, error) {
|
||||
out, err := exec.Command("docker", "wait", container).CombinedOutput()
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))
|
||||
if err != nil {
|
||||
return out, errors.New("unexpected exit status from docker wait")
|
||||
}
|
||||
out, err = exec.Command("docker", "logs", container).CombinedOutput()
|
||||
exec.Command("docker", "rm", container).Run()
|
||||
if err == nil && exitStatus != 0 {
|
||||
err = fmt.Errorf("exit status %d: %s", exitStatus, out)
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
func kill(container string) {
|
||||
exec.Command("docker", "kill", container).Run()
|
||||
exec.Command("docker", "rm", container).Run()
|
||||
}
|
||||
|
||||
func cleanDate(res *http.Response) {
|
||||
if d := res.Header["Date"]; len(d) == 1 {
|
||||
d[0] = "XXX"
|
||||
}
|
||||
}
|
||||
|
||||
func TestSorterPoolAllocs(t *testing.T) {
|
||||
ss := []string{"a", "b", "c"}
|
||||
h := http.Header{
|
||||
"a": nil,
|
||||
"b": nil,
|
||||
"c": nil,
|
||||
}
|
||||
sorter := new(sorter)
|
||||
|
||||
if allocs := testing.AllocsPerRun(100, func() {
|
||||
sorter.SortStrings(ss)
|
||||
}); allocs >= 1 {
|
||||
t.Logf("SortStrings allocs = %v; want <1", allocs)
|
||||
}
|
||||
|
||||
if allocs := testing.AllocsPerRun(5, func() {
|
||||
if len(sorter.Keys(h)) != 3 {
|
||||
t.Fatal("wrong result")
|
||||
}
|
||||
}); allocs > 0 {
|
||||
t.Logf("Keys allocs = %v; want <1", allocs)
|
||||
}
|
||||
}
|
||||
11
vendor/golang.org/x/net/http2/not_go15.go
generated
vendored
Normal file
11
vendor/golang.org/x/net/http2/not_go15.go
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.5
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
func requestCancel(req *http.Request) <-chan struct{} { return nil }
|
||||
13
vendor/golang.org/x/net/http2/not_go16.go
generated
vendored
Normal file
13
vendor/golang.org/x/net/http2/not_go16.go
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.6
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||
return nil, errTransportVersion
|
||||
}
|
||||
147
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
Normal file
147
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
|
||||
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
|
||||
// underlying buffer is an interface. (io.Pipe is always unbuffered)
|
||||
type pipe struct {
|
||||
mu sync.Mutex
|
||||
c sync.Cond // c.L lazily initialized to &p.mu
|
||||
b pipeBuffer
|
||||
err error // read error once empty. non-nil means closed.
|
||||
breakErr error // immediate read error (caller doesn't see rest of b)
|
||||
donec chan struct{} // closed on error
|
||||
readFn func() // optional code to run in Read before error
|
||||
}
|
||||
|
||||
type pipeBuffer interface {
|
||||
Len() int
|
||||
io.Writer
|
||||
io.Reader
|
||||
}
|
||||
|
||||
// Read waits until data is available and copies bytes
|
||||
// from the buffer into p.
|
||||
func (p *pipe) Read(d []byte) (n int, err error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
for {
|
||||
if p.breakErr != nil {
|
||||
return 0, p.breakErr
|
||||
}
|
||||
if p.b.Len() > 0 {
|
||||
return p.b.Read(d)
|
||||
}
|
||||
if p.err != nil {
|
||||
if p.readFn != nil {
|
||||
p.readFn() // e.g. copy trailers
|
||||
p.readFn = nil // not sticky like p.err
|
||||
}
|
||||
return 0, p.err
|
||||
}
|
||||
p.c.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
var errClosedPipeWrite = errors.New("write on closed buffer")
|
||||
|
||||
// Write copies bytes from p into the buffer and wakes a reader.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
func (p *pipe) Write(d []byte) (n int, err error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
defer p.c.Signal()
|
||||
if p.err != nil {
|
||||
return 0, errClosedPipeWrite
|
||||
}
|
||||
return p.b.Write(d)
|
||||
}
|
||||
|
||||
// CloseWithError causes the next Read (waking up a current blocked
|
||||
// Read if needed) to return the provided err after all data has been
|
||||
// read.
|
||||
//
|
||||
// The error must be non-nil.
|
||||
func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
|
||||
|
||||
// BreakWithError causes the next Read (waking up a current blocked
|
||||
// Read if needed) to return the provided err immediately, without
|
||||
// waiting for unread data.
|
||||
func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
|
||||
|
||||
// closeWithErrorAndCode is like CloseWithError but also sets some code to run
|
||||
// in the caller's goroutine before returning the error.
|
||||
func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
|
||||
|
||||
func (p *pipe) closeWithError(dst *error, err error, fn func()) {
|
||||
if err == nil {
|
||||
panic("err must be non-nil")
|
||||
}
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
defer p.c.Signal()
|
||||
if *dst != nil {
|
||||
// Already been done.
|
||||
return
|
||||
}
|
||||
p.readFn = fn
|
||||
*dst = err
|
||||
p.closeDoneLocked()
|
||||
}
|
||||
|
||||
// requires p.mu be held.
|
||||
func (p *pipe) closeDoneLocked() {
|
||||
if p.donec == nil {
|
||||
return
|
||||
}
|
||||
// Close if unclosed. This isn't racy since we always
|
||||
// hold p.mu while closing.
|
||||
select {
|
||||
case <-p.donec:
|
||||
default:
|
||||
close(p.donec)
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns the error (if any) first set by BreakWithError or CloseWithError.
|
||||
func (p *pipe) Err() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.breakErr != nil {
|
||||
return p.breakErr
|
||||
}
|
||||
return p.err
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed if and when this pipe is closed
|
||||
// with CloseWithError.
|
||||
func (p *pipe) Done() <-chan struct{} {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.donec == nil {
|
||||
p.donec = make(chan struct{})
|
||||
if p.err != nil || p.breakErr != nil {
|
||||
// Already hit an error.
|
||||
p.closeDoneLocked()
|
||||
}
|
||||
}
|
||||
return p.donec
|
||||
}
|
||||
109
vendor/golang.org/x/net/http2/pipe_test.go
generated
vendored
Normal file
109
vendor/golang.org/x/net/http2/pipe_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPipeClose(t *testing.T) {
|
||||
var p pipe
|
||||
p.b = new(bytes.Buffer)
|
||||
a := errors.New("a")
|
||||
b := errors.New("b")
|
||||
p.CloseWithError(a)
|
||||
p.CloseWithError(b)
|
||||
_, err := p.Read(make([]byte, 1))
|
||||
if err != a {
|
||||
t.Errorf("err = %v want %v", err, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeDoneChan(t *testing.T) {
|
||||
var p pipe
|
||||
done := p.Done()
|
||||
select {
|
||||
case <-done:
|
||||
t.Fatal("done too soon")
|
||||
default:
|
||||
}
|
||||
p.CloseWithError(io.EOF)
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
t.Fatal("should be done")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeDoneChan_ErrFirst(t *testing.T) {
|
||||
var p pipe
|
||||
p.CloseWithError(io.EOF)
|
||||
done := p.Done()
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
t.Fatal("should be done")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeDoneChan_Break(t *testing.T) {
|
||||
var p pipe
|
||||
done := p.Done()
|
||||
select {
|
||||
case <-done:
|
||||
t.Fatal("done too soon")
|
||||
default:
|
||||
}
|
||||
p.BreakWithError(io.EOF)
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
t.Fatal("should be done")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeDoneChan_Break_ErrFirst(t *testing.T) {
|
||||
var p pipe
|
||||
p.BreakWithError(io.EOF)
|
||||
done := p.Done()
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
t.Fatal("should be done")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeCloseWithError(t *testing.T) {
|
||||
p := &pipe{b: new(bytes.Buffer)}
|
||||
const body = "foo"
|
||||
io.WriteString(p, body)
|
||||
a := errors.New("test error")
|
||||
p.CloseWithError(a)
|
||||
all, err := ioutil.ReadAll(p)
|
||||
if string(all) != body {
|
||||
t.Errorf("read bytes = %q; want %q", all, body)
|
||||
}
|
||||
if err != a {
|
||||
t.Logf("read error = %v, %v", err, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeBreakWithError(t *testing.T) {
|
||||
p := &pipe{b: new(bytes.Buffer)}
|
||||
io.WriteString(p, "foo")
|
||||
a := errors.New("test err")
|
||||
p.BreakWithError(a)
|
||||
all, err := ioutil.ReadAll(p)
|
||||
if string(all) != "" {
|
||||
t.Errorf("read bytes = %q; want empty string", all)
|
||||
}
|
||||
if err != a {
|
||||
t.Logf("read error = %v, %v", err, a)
|
||||
}
|
||||
}
|
||||
118
vendor/golang.org/x/net/http2/priority_test.go
generated
vendored
Normal file
118
vendor/golang.org/x/net/http2/priority_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPriority(t *testing.T) {
|
||||
// A -> B
|
||||
// move A's parent to B
|
||||
streams := make(map[uint32]*stream)
|
||||
a := &stream{
|
||||
parent: nil,
|
||||
weight: 16,
|
||||
}
|
||||
streams[1] = a
|
||||
b := &stream{
|
||||
parent: a,
|
||||
weight: 16,
|
||||
}
|
||||
streams[2] = b
|
||||
adjustStreamPriority(streams, 1, PriorityParam{
|
||||
Weight: 20,
|
||||
StreamDep: 2,
|
||||
})
|
||||
if a.parent != b {
|
||||
t.Errorf("Expected A's parent to be B")
|
||||
}
|
||||
if a.weight != 20 {
|
||||
t.Errorf("Expected A's weight to be 20; got %d", a.weight)
|
||||
}
|
||||
if b.parent != nil {
|
||||
t.Errorf("Expected B to have no parent")
|
||||
}
|
||||
if b.weight != 16 {
|
||||
t.Errorf("Expected B's weight to be 16; got %d", b.weight)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityExclusiveZero(t *testing.T) {
|
||||
// A B and C are all children of the 0 stream.
|
||||
// Exclusive reprioritization to any of the streams
|
||||
// should bring the rest of the streams under the
|
||||
// reprioritized stream
|
||||
streams := make(map[uint32]*stream)
|
||||
a := &stream{
|
||||
parent: nil,
|
||||
weight: 16,
|
||||
}
|
||||
streams[1] = a
|
||||
b := &stream{
|
||||
parent: nil,
|
||||
weight: 16,
|
||||
}
|
||||
streams[2] = b
|
||||
c := &stream{
|
||||
parent: nil,
|
||||
weight: 16,
|
||||
}
|
||||
streams[3] = c
|
||||
adjustStreamPriority(streams, 3, PriorityParam{
|
||||
Weight: 20,
|
||||
StreamDep: 0,
|
||||
Exclusive: true,
|
||||
})
|
||||
if a.parent != c {
|
||||
t.Errorf("Expected A's parent to be C")
|
||||
}
|
||||
if a.weight != 16 {
|
||||
t.Errorf("Expected A's weight to be 16; got %d", a.weight)
|
||||
}
|
||||
if b.parent != c {
|
||||
t.Errorf("Expected B's parent to be C")
|
||||
}
|
||||
if b.weight != 16 {
|
||||
t.Errorf("Expected B's weight to be 16; got %d", b.weight)
|
||||
}
|
||||
if c.parent != nil {
|
||||
t.Errorf("Expected C to have no parent")
|
||||
}
|
||||
if c.weight != 20 {
|
||||
t.Errorf("Expected C's weight to be 20; got %d", b.weight)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityOwnParent(t *testing.T) {
|
||||
streams := make(map[uint32]*stream)
|
||||
a := &stream{
|
||||
parent: nil,
|
||||
weight: 16,
|
||||
}
|
||||
streams[1] = a
|
||||
b := &stream{
|
||||
parent: a,
|
||||
weight: 16,
|
||||
}
|
||||
streams[2] = b
|
||||
adjustStreamPriority(streams, 1, PriorityParam{
|
||||
Weight: 20,
|
||||
StreamDep: 1,
|
||||
})
|
||||
if a.parent != nil {
|
||||
t.Errorf("Expected A's parent to be nil")
|
||||
}
|
||||
if a.weight != 20 {
|
||||
t.Errorf("Expected A's weight to be 20; got %d", a.weight)
|
||||
}
|
||||
if b.parent != a {
|
||||
t.Errorf("Expected B's parent to be A")
|
||||
}
|
||||
if b.weight != 16 {
|
||||
t.Errorf("Expected B's weight to be 16; got %d", b.weight)
|
||||
}
|
||||
|
||||
}
|
||||
2215
vendor/golang.org/x/net/http2/server.go
generated
vendored
Normal file
2215
vendor/golang.org/x/net/http2/server.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
3244
vendor/golang.org/x/net/http2/server_test.go
generated
vendored
Normal file
3244
vendor/golang.org/x/net/http2/server_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
5021
vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml
generated
vendored
Normal file
5021
vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1664
vendor/golang.org/x/net/http2/transport.go
generated
vendored
Normal file
1664
vendor/golang.org/x/net/http2/transport.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1802
vendor/golang.org/x/net/http2/transport_test.go
generated
vendored
Normal file
1802
vendor/golang.org/x/net/http2/transport_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
262
vendor/golang.org/x/net/http2/write.go
generated
vendored
Normal file
262
vendor/golang.org/x/net/http2/write.go
generated
vendored
Normal file
|
|
@ -0,0 +1,262 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
// writeFramer is implemented by any type that is used to write frames.
|
||||
type writeFramer interface {
|
||||
writeFrame(writeContext) error
|
||||
}
|
||||
|
||||
// writeContext is the interface needed by the various frame writer
|
||||
// types below. All the writeFrame methods below are scheduled via the
|
||||
// frame writing scheduler (see writeScheduler in writesched.go).
|
||||
//
|
||||
// This interface is implemented by *serverConn.
|
||||
//
|
||||
// TODO: decide whether to a) use this in the client code (which didn't
|
||||
// end up using this yet, because it has a simpler design, not
|
||||
// currently implementing priorities), or b) delete this and
|
||||
// make the server code a bit more concrete.
|
||||
type writeContext interface {
|
||||
Framer() *Framer
|
||||
Flush() error
|
||||
CloseConn() error
|
||||
// HeaderEncoder returns an HPACK encoder that writes to the
|
||||
// returned buffer.
|
||||
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
|
||||
}
|
||||
|
||||
// endsStream reports whether the given frame writer w will locally
|
||||
// close the stream.
|
||||
func endsStream(w writeFramer) bool {
|
||||
switch v := w.(type) {
|
||||
case *writeData:
|
||||
return v.endStream
|
||||
case *writeResHeaders:
|
||||
return v.endStream
|
||||
case nil:
|
||||
// This can only happen if the caller reuses w after it's
|
||||
// been intentionally nil'ed out to prevent use. Keep this
|
||||
// here to catch future refactoring breaking it.
|
||||
panic("endsStream called on nil writeFramer")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type flushFrameWriter struct{}
|
||||
|
||||
func (flushFrameWriter) writeFrame(ctx writeContext) error {
|
||||
return ctx.Flush()
|
||||
}
|
||||
|
||||
type writeSettings []Setting
|
||||
|
||||
func (s writeSettings) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteSettings([]Setting(s)...)
|
||||
}
|
||||
|
||||
type writeGoAway struct {
|
||||
maxStreamID uint32
|
||||
code ErrCode
|
||||
}
|
||||
|
||||
func (p *writeGoAway) writeFrame(ctx writeContext) error {
|
||||
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
|
||||
if p.code != 0 {
|
||||
ctx.Flush() // ignore error: we're hanging up on them anyway
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
ctx.CloseConn()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type writeData struct {
|
||||
streamID uint32
|
||||
p []byte
|
||||
endStream bool
|
||||
}
|
||||
|
||||
func (w *writeData) String() string {
|
||||
return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
|
||||
}
|
||||
|
||||
func (w *writeData) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
|
||||
}
|
||||
|
||||
// handlerPanicRST is the message sent from handler goroutines when
|
||||
// the handler panics.
|
||||
type handlerPanicRST struct {
|
||||
StreamID uint32
|
||||
}
|
||||
|
||||
func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
|
||||
}
|
||||
|
||||
func (se StreamError) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
|
||||
}
|
||||
|
||||
type writePingAck struct{ pf *PingFrame }
|
||||
|
||||
func (w writePingAck) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WritePing(true, w.pf.Data)
|
||||
}
|
||||
|
||||
type writeSettingsAck struct{}
|
||||
|
||||
func (writeSettingsAck) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteSettingsAck()
|
||||
}
|
||||
|
||||
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
|
||||
// for HTTP response headers or trailers from a server handler.
|
||||
type writeResHeaders struct {
|
||||
streamID uint32
|
||||
httpResCode int // 0 means no ":status" line
|
||||
h http.Header // may be nil
|
||||
trailers []string // if non-nil, which keys of h to write. nil means all.
|
||||
endStream bool
|
||||
|
||||
date string
|
||||
contentType string
|
||||
contentLength string
|
||||
}
|
||||
|
||||
func encKV(enc *hpack.Encoder, k, v string) {
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: server encoding header %q = %q", k, v)
|
||||
}
|
||||
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
|
||||
if w.httpResCode != 0 {
|
||||
encKV(enc, ":status", httpCodeString(w.httpResCode))
|
||||
}
|
||||
|
||||
encodeHeaders(enc, w.h, w.trailers)
|
||||
|
||||
if w.contentType != "" {
|
||||
encKV(enc, "content-type", w.contentType)
|
||||
}
|
||||
if w.contentLength != "" {
|
||||
encKV(enc, "content-length", w.contentLength)
|
||||
}
|
||||
if w.date != "" {
|
||||
encKV(enc, "date", w.date)
|
||||
}
|
||||
|
||||
headerBlock := buf.Bytes()
|
||||
if len(headerBlock) == 0 && w.trailers == nil {
|
||||
panic("unexpected empty hpack")
|
||||
}
|
||||
|
||||
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
|
||||
// that all peers must support (16KB). Later we could care
|
||||
// more and send larger frames if the peer advertised it, but
|
||||
// there's little point. Most headers are small anyway (so we
|
||||
// generally won't have CONTINUATION frames), and extra frames
|
||||
// only waste 9 bytes anyway.
|
||||
const maxFrameSize = 16384
|
||||
|
||||
first := true
|
||||
for len(headerBlock) > 0 {
|
||||
frag := headerBlock
|
||||
if len(frag) > maxFrameSize {
|
||||
frag = frag[:maxFrameSize]
|
||||
}
|
||||
headerBlock = headerBlock[len(frag):]
|
||||
endHeaders := len(headerBlock) == 0
|
||||
var err error
|
||||
if first {
|
||||
first = false
|
||||
err = ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: frag,
|
||||
EndStream: w.endStream,
|
||||
EndHeaders: endHeaders,
|
||||
})
|
||||
} else {
|
||||
err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type write100ContinueHeadersFrame struct {
|
||||
streamID uint32
|
||||
}
|
||||
|
||||
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
encKV(enc, ":status", "100")
|
||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: buf.Bytes(),
|
||||
EndStream: false,
|
||||
EndHeaders: true,
|
||||
})
|
||||
}
|
||||
|
||||
type writeWindowUpdate struct {
|
||||
streamID uint32 // or 0 for conn-level
|
||||
n uint32
|
||||
}
|
||||
|
||||
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
|
||||
}
|
||||
|
||||
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
||||
if keys == nil {
|
||||
sorter := sorterPool.Get().(*sorter)
|
||||
// Using defer here, since the returned keys from the
|
||||
// sorter.Keys method is only valid until the sorter
|
||||
// is returned:
|
||||
defer sorterPool.Put(sorter)
|
||||
keys = sorter.Keys(h)
|
||||
}
|
||||
for _, k := range keys {
|
||||
vv := h[k]
|
||||
k = lowerHeader(k)
|
||||
if !validHeaderFieldName(k) {
|
||||
// TODO: return an error? golang.org/issue/14048
|
||||
// For now just omit it.
|
||||
continue
|
||||
}
|
||||
isTE := k == "transfer-encoding"
|
||||
for _, v := range vv {
|
||||
if !validHeaderFieldValue(v) {
|
||||
// TODO: return an error? golang.org/issue/14048
|
||||
// For now just omit it.
|
||||
continue
|
||||
}
|
||||
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
|
||||
if isTE && v != "trailers" {
|
||||
continue
|
||||
}
|
||||
encKV(enc, k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
283
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
Normal file
283
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
Normal file
|
|
@ -0,0 +1,283 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "fmt"
|
||||
|
||||
// frameWriteMsg is a request to write a frame.
|
||||
type frameWriteMsg struct {
|
||||
// write is the interface value that does the writing, once the
|
||||
// writeScheduler (below) has decided to select this frame
|
||||
// to write. The write functions are all defined in write.go.
|
||||
write writeFramer
|
||||
|
||||
stream *stream // used for prioritization. nil for non-stream frames.
|
||||
|
||||
// done, if non-nil, must be a buffered channel with space for
|
||||
// 1 message and is sent the return value from write (or an
|
||||
// earlier error) when the frame has been written.
|
||||
done chan error
|
||||
}
|
||||
|
||||
// for debugging only:
|
||||
func (wm frameWriteMsg) String() string {
|
||||
var streamID uint32
|
||||
if wm.stream != nil {
|
||||
streamID = wm.stream.id
|
||||
}
|
||||
var des string
|
||||
if s, ok := wm.write.(fmt.Stringer); ok {
|
||||
des = s.String()
|
||||
} else {
|
||||
des = fmt.Sprintf("%T", wm.write)
|
||||
}
|
||||
return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
|
||||
}
|
||||
|
||||
// writeScheduler tracks pending frames to write, priorities, and decides
|
||||
// the next one to use. It is not thread-safe.
|
||||
type writeScheduler struct {
|
||||
// zero are frames not associated with a specific stream.
|
||||
// They're sent before any stream-specific freams.
|
||||
zero writeQueue
|
||||
|
||||
// maxFrameSize is the maximum size of a DATA frame
|
||||
// we'll write. Must be non-zero and between 16K-16M.
|
||||
maxFrameSize uint32
|
||||
|
||||
// sq contains the stream-specific queues, keyed by stream ID.
|
||||
// when a stream is idle, it's deleted from the map.
|
||||
sq map[uint32]*writeQueue
|
||||
|
||||
// canSend is a slice of memory that's reused between frame
|
||||
// scheduling decisions to hold the list of writeQueues (from sq)
|
||||
// which have enough flow control data to send. After canSend is
|
||||
// built, the best is selected.
|
||||
canSend []*writeQueue
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
queuePool []*writeQueue
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
|
||||
if len(q.s) != 0 {
|
||||
panic("queue must be empty")
|
||||
}
|
||||
ws.queuePool = append(ws.queuePool, q)
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) getEmptyQueue() *writeQueue {
|
||||
ln := len(ws.queuePool)
|
||||
if ln == 0 {
|
||||
return new(writeQueue)
|
||||
}
|
||||
q := ws.queuePool[ln-1]
|
||||
ws.queuePool = ws.queuePool[:ln-1]
|
||||
return q
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
|
||||
|
||||
func (ws *writeScheduler) add(wm frameWriteMsg) {
|
||||
st := wm.stream
|
||||
if st == nil {
|
||||
ws.zero.push(wm)
|
||||
} else {
|
||||
ws.streamQueue(st.id).push(wm)
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
|
||||
if q, ok := ws.sq[streamID]; ok {
|
||||
return q
|
||||
}
|
||||
if ws.sq == nil {
|
||||
ws.sq = make(map[uint32]*writeQueue)
|
||||
}
|
||||
q := ws.getEmptyQueue()
|
||||
ws.sq[streamID] = q
|
||||
return q
|
||||
}
|
||||
|
||||
// take returns the most important frame to write and removes it from the scheduler.
|
||||
// It is illegal to call this if the scheduler is empty or if there are no connection-level
|
||||
// flow control bytes available.
|
||||
func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
|
||||
if ws.maxFrameSize == 0 {
|
||||
panic("internal error: ws.maxFrameSize not initialized or invalid")
|
||||
}
|
||||
|
||||
// If there any frames not associated with streams, prefer those first.
|
||||
// These are usually SETTINGS, etc.
|
||||
if !ws.zero.empty() {
|
||||
return ws.zero.shift(), true
|
||||
}
|
||||
if len(ws.sq) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Next, prioritize frames on streams that aren't DATA frames (no cost).
|
||||
for id, q := range ws.sq {
|
||||
if q.firstIsNoCost() {
|
||||
return ws.takeFrom(id, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Now, all that remains are DATA frames with non-zero bytes to
|
||||
// send. So pick the best one.
|
||||
if len(ws.canSend) != 0 {
|
||||
panic("should be empty")
|
||||
}
|
||||
for _, q := range ws.sq {
|
||||
if n := ws.streamWritableBytes(q); n > 0 {
|
||||
ws.canSend = append(ws.canSend, q)
|
||||
}
|
||||
}
|
||||
if len(ws.canSend) == 0 {
|
||||
return
|
||||
}
|
||||
defer ws.zeroCanSend()
|
||||
|
||||
// TODO: find the best queue
|
||||
q := ws.canSend[0]
|
||||
|
||||
return ws.takeFrom(q.streamID(), q)
|
||||
}
|
||||
|
||||
// zeroCanSend is defered from take.
|
||||
func (ws *writeScheduler) zeroCanSend() {
|
||||
for i := range ws.canSend {
|
||||
ws.canSend[i] = nil
|
||||
}
|
||||
ws.canSend = ws.canSend[:0]
|
||||
}
|
||||
|
||||
// streamWritableBytes returns the number of DATA bytes we could write
|
||||
// from the given queue's stream, if this stream/queue were
|
||||
// selected. It is an error to call this if q's head isn't a
|
||||
// *writeData.
|
||||
func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
|
||||
wm := q.head()
|
||||
ret := wm.stream.flow.available() // max we can write
|
||||
if ret == 0 {
|
||||
return 0
|
||||
}
|
||||
if int32(ws.maxFrameSize) < ret {
|
||||
ret = int32(ws.maxFrameSize)
|
||||
}
|
||||
if ret == 0 {
|
||||
panic("internal error: ws.maxFrameSize not initialized or invalid")
|
||||
}
|
||||
wd := wm.write.(*writeData)
|
||||
if len(wd.p) < int(ret) {
|
||||
ret = int32(len(wd.p))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
|
||||
wm = q.head()
|
||||
// If the first item in this queue costs flow control tokens
|
||||
// and we don't have enough, write as much as we can.
|
||||
if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
|
||||
allowed := wm.stream.flow.available() // max we can write
|
||||
if allowed == 0 {
|
||||
// No quota available. Caller can try the next stream.
|
||||
return frameWriteMsg{}, false
|
||||
}
|
||||
if int32(ws.maxFrameSize) < allowed {
|
||||
allowed = int32(ws.maxFrameSize)
|
||||
}
|
||||
// TODO: further restrict the allowed size, because even if
|
||||
// the peer says it's okay to write 16MB data frames, we might
|
||||
// want to write smaller ones to properly weight competing
|
||||
// streams' priorities.
|
||||
|
||||
if len(wd.p) > int(allowed) {
|
||||
wm.stream.flow.take(allowed)
|
||||
chunk := wd.p[:allowed]
|
||||
wd.p = wd.p[allowed:]
|
||||
// Make up a new write message of a valid size, rather
|
||||
// than shifting one off the queue.
|
||||
return frameWriteMsg{
|
||||
stream: wm.stream,
|
||||
write: &writeData{
|
||||
streamID: wd.streamID,
|
||||
p: chunk,
|
||||
// even if the original had endStream set, there
|
||||
// arebytes remaining because len(wd.p) > allowed,
|
||||
// so we know endStream is false:
|
||||
endStream: false,
|
||||
},
|
||||
// our caller is blocking on the final DATA frame, not
|
||||
// these intermediates, so no need to wait:
|
||||
done: nil,
|
||||
}, true
|
||||
}
|
||||
wm.stream.flow.take(int32(len(wd.p)))
|
||||
}
|
||||
|
||||
q.shift()
|
||||
if q.empty() {
|
||||
ws.putEmptyQueue(q)
|
||||
delete(ws.sq, id)
|
||||
}
|
||||
return wm, true
|
||||
}
|
||||
|
||||
func (ws *writeScheduler) forgetStream(id uint32) {
|
||||
q, ok := ws.sq[id]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(ws.sq, id)
|
||||
|
||||
// But keep it for others later.
|
||||
for i := range q.s {
|
||||
q.s[i] = frameWriteMsg{}
|
||||
}
|
||||
q.s = q.s[:0]
|
||||
ws.putEmptyQueue(q)
|
||||
}
|
||||
|
||||
type writeQueue struct {
|
||||
s []frameWriteMsg
|
||||
}
|
||||
|
||||
// streamID returns the stream ID for a non-empty stream-specific queue.
|
||||
func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
|
||||
|
||||
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
|
||||
|
||||
func (q *writeQueue) push(wm frameWriteMsg) {
|
||||
q.s = append(q.s, wm)
|
||||
}
|
||||
|
||||
// head returns the next item that would be removed by shift.
|
||||
func (q *writeQueue) head() frameWriteMsg {
|
||||
if len(q.s) == 0 {
|
||||
panic("invalid use of queue")
|
||||
}
|
||||
return q.s[0]
|
||||
}
|
||||
|
||||
func (q *writeQueue) shift() frameWriteMsg {
|
||||
if len(q.s) == 0 {
|
||||
panic("invalid use of queue")
|
||||
}
|
||||
wm := q.s[0]
|
||||
// TODO: less copy-happy queue.
|
||||
copy(q.s, q.s[1:])
|
||||
q.s[len(q.s)-1] = frameWriteMsg{}
|
||||
q.s = q.s[:len(q.s)-1]
|
||||
return wm
|
||||
}
|
||||
|
||||
func (q *writeQueue) firstIsNoCost() bool {
|
||||
if df, ok := q.s[0].write.(*writeData); ok {
|
||||
return len(df.p) == 0
|
||||
}
|
||||
return true
|
||||
}
|
||||
356
vendor/golang.org/x/net/http2/z_spec_test.go
generated
vendored
Normal file
356
vendor/golang.org/x/net/http2/z_spec_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,356 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests")
|
||||
|
||||
// The global map of sentence coverage for the http2 spec.
|
||||
var defaultSpecCoverage specCoverage
|
||||
|
||||
var loadSpecOnce sync.Once
|
||||
|
||||
func loadSpec() {
|
||||
if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
defaultSpecCoverage = readSpecCov(f)
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not
|
||||
// "covered" will be included in report outputted by TestSpecCoverage.
|
||||
func covers(sec, sentences string) {
|
||||
loadSpecOnce.Do(loadSpec)
|
||||
defaultSpecCoverage.cover(sec, sentences)
|
||||
}
|
||||
|
||||
type specPart struct {
|
||||
section string
|
||||
sentence string
|
||||
}
|
||||
|
||||
func (ss specPart) Less(oo specPart) bool {
|
||||
atoi := func(s string) int {
|
||||
n, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
a := strings.Split(ss.section, ".")
|
||||
b := strings.Split(oo.section, ".")
|
||||
for len(a) > 0 {
|
||||
if len(b) == 0 {
|
||||
return false
|
||||
}
|
||||
x, y := atoi(a[0]), atoi(b[0])
|
||||
if x == y {
|
||||
a, b = a[1:], b[1:]
|
||||
continue
|
||||
}
|
||||
return x < y
|
||||
}
|
||||
if len(b) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type bySpecSection []specPart
|
||||
|
||||
func (a bySpecSection) Len() int { return len(a) }
|
||||
func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }
|
||||
func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type specCoverage struct {
|
||||
coverage map[specPart]bool
|
||||
d *xml.Decoder
|
||||
}
|
||||
|
||||
func joinSection(sec []int) string {
|
||||
s := fmt.Sprintf("%d", sec[0])
|
||||
for _, n := range sec[1:] {
|
||||
s = fmt.Sprintf("%s.%d", s, n)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (sc specCoverage) readSection(sec []int) {
|
||||
var (
|
||||
buf = new(bytes.Buffer)
|
||||
sub = 0
|
||||
)
|
||||
for {
|
||||
tk, err := sc.d.Token()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
switch v := tk.(type) {
|
||||
case xml.StartElement:
|
||||
if skipElement(v) {
|
||||
if err := sc.d.Skip(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if v.Name.Local == "section" {
|
||||
sub++
|
||||
}
|
||||
break
|
||||
}
|
||||
switch v.Name.Local {
|
||||
case "section":
|
||||
sub++
|
||||
sc.readSection(append(sec, sub))
|
||||
case "xref":
|
||||
buf.Write(sc.readXRef(v))
|
||||
}
|
||||
case xml.CharData:
|
||||
if len(sec) == 0 {
|
||||
break
|
||||
}
|
||||
buf.Write(v)
|
||||
case xml.EndElement:
|
||||
if v.Name.Local == "section" {
|
||||
sc.addSentences(joinSection(sec), buf.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sc specCoverage) readXRef(se xml.StartElement) []byte {
|
||||
var b []byte
|
||||
for {
|
||||
tk, err := sc.d.Token()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
switch v := tk.(type) {
|
||||
case xml.CharData:
|
||||
if b != nil {
|
||||
panic("unexpected CharData")
|
||||
}
|
||||
b = []byte(string(v))
|
||||
case xml.EndElement:
|
||||
if v.Name.Local != "xref" {
|
||||
panic("expected </xref>")
|
||||
}
|
||||
if b != nil {
|
||||
return b
|
||||
}
|
||||
sig := attrSig(se)
|
||||
switch sig {
|
||||
case "target":
|
||||
return []byte(fmt.Sprintf("[%s]", attrValue(se, "target")))
|
||||
case "fmt-of,rel,target", "fmt-,,rel,target":
|
||||
return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel")))
|
||||
case "fmt-of,sec,target", "fmt-,,sec,target":
|
||||
return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target")))
|
||||
case "fmt-of,rel,sec,target":
|
||||
return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel")))
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se)))
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected tag %q", v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var skipAnchor = map[string]bool{
|
||||
"intro": true,
|
||||
"Overview": true,
|
||||
}
|
||||
|
||||
var skipTitle = map[string]bool{
|
||||
"Acknowledgements": true,
|
||||
"Change Log": true,
|
||||
"Document Organization": true,
|
||||
"Conventions and Terminology": true,
|
||||
}
|
||||
|
||||
func skipElement(s xml.StartElement) bool {
|
||||
switch s.Name.Local {
|
||||
case "artwork":
|
||||
return true
|
||||
case "section":
|
||||
for _, attr := range s.Attr {
|
||||
switch attr.Name.Local {
|
||||
case "anchor":
|
||||
if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") {
|
||||
return true
|
||||
}
|
||||
case "title":
|
||||
if skipTitle[attr.Value] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func readSpecCov(r io.Reader) specCoverage {
|
||||
sc := specCoverage{
|
||||
coverage: map[specPart]bool{},
|
||||
d: xml.NewDecoder(r)}
|
||||
sc.readSection(nil)
|
||||
return sc
|
||||
}
|
||||
|
||||
func (sc specCoverage) addSentences(sec string, sentence string) {
|
||||
for _, s := range parseSentences(sentence) {
|
||||
sc.coverage[specPart{sec, s}] = false
|
||||
}
|
||||
}
|
||||
|
||||
func (sc specCoverage) cover(sec string, sentence string) {
|
||||
for _, s := range parseSentences(sentence) {
|
||||
p := specPart{sec, s}
|
||||
if _, ok := sc.coverage[p]; !ok {
|
||||
panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s))
|
||||
}
|
||||
sc.coverage[specPart{sec, s}] = true
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var whitespaceRx = regexp.MustCompile(`\s+`)
|
||||
|
||||
func parseSentences(sens string) []string {
|
||||
sens = strings.TrimSpace(sens)
|
||||
if sens == "" {
|
||||
return nil
|
||||
}
|
||||
ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ")
|
||||
for i, s := range ss {
|
||||
s = strings.TrimSpace(s)
|
||||
if !strings.HasSuffix(s, ".") {
|
||||
s += "."
|
||||
}
|
||||
ss[i] = s
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
func TestSpecParseSentences(t *testing.T) {
|
||||
tests := []struct {
|
||||
ss string
|
||||
want []string
|
||||
}{
|
||||
{"Sentence 1. Sentence 2.",
|
||||
[]string{
|
||||
"Sentence 1.",
|
||||
"Sentence 2.",
|
||||
}},
|
||||
{"Sentence 1. \nSentence 2.\tSentence 3.",
|
||||
[]string{
|
||||
"Sentence 1.",
|
||||
"Sentence 2.",
|
||||
"Sentence 3.",
|
||||
}},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
got := parseSentences(tt.ss)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("%d: got = %q, want %q", i, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpecCoverage(t *testing.T) {
|
||||
if !*coverSpec {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
loadSpecOnce.Do(loadSpec)
|
||||
|
||||
var (
|
||||
list []specPart
|
||||
cv = defaultSpecCoverage.coverage
|
||||
total = len(cv)
|
||||
complete = 0
|
||||
)
|
||||
|
||||
for sp, touched := range defaultSpecCoverage.coverage {
|
||||
if touched {
|
||||
complete++
|
||||
} else {
|
||||
list = append(list, sp)
|
||||
}
|
||||
}
|
||||
sort.Stable(bySpecSection(list))
|
||||
|
||||
if testing.Short() && len(list) > 5 {
|
||||
list = list[:5]
|
||||
}
|
||||
|
||||
for _, p := range list {
|
||||
t.Errorf("\tSECTION %s: %s", p.section, p.sentence)
|
||||
}
|
||||
|
||||
t.Logf("%d/%d (%d%%) sentances covered", complete, total, (complete/total)*100)
|
||||
}
|
||||
|
||||
func attrSig(se xml.StartElement) string {
|
||||
var names []string
|
||||
for _, attr := range se.Attr {
|
||||
if attr.Name.Local == "fmt" {
|
||||
names = append(names, "fmt-"+attr.Value)
|
||||
} else {
|
||||
names = append(names, attr.Name.Local)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
return strings.Join(names, ",")
|
||||
}
|
||||
|
||||
func attrValue(se xml.StartElement, attr string) string {
|
||||
for _, a := range se.Attr {
|
||||
if a.Name.Local == attr {
|
||||
return a.Value
|
||||
}
|
||||
}
|
||||
panic("unknown attribute " + attr)
|
||||
}
|
||||
|
||||
func TestSpecPartLess(t *testing.T) {
|
||||
tests := []struct {
|
||||
sec1, sec2 string
|
||||
want bool
|
||||
}{
|
||||
{"6.2.1", "6.2", false},
|
||||
{"6.2", "6.2.1", true},
|
||||
{"6.10", "6.10.1", true},
|
||||
{"6.10", "6.1.1", false}, // 10, not 1
|
||||
{"6.1", "6.1", false}, // equal, so not less
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"})
|
||||
if got != tt.want {
|
||||
t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue