Upgrade the operator to use Operator SDK v1.33.0 (#182)

* Move controller package inside internal directory

Based on the go/v4 project structure, the following changed:
- Pakcage `controllers` is now named `controller`
- Package `controller` now lives inside new `internal` directory

* Move main.go in cmd directory

Based on the new go/v4 project structure, `main.go` now lives in the `cmd` directory.

* Change package import in main.go

* Update go mod dependencies

Update the dependencies based on the versions obtained by creating a new operator project using `kubebuilder init --domain onepassword.com --plugins=go/v4`.

This is based on the migration steps provided to go from go/v3 to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4)

* Update vendor

* Adjust code for breaking changes from pkg update

sigs.k8s.io/controller-runtime package had breaking changes from v0.14.5 to v0.16.3. This commit brings the changes needed to achieve the same things using the new functionality avaialble.

* Adjust paths to connect yaml files

Since `main.go` is now in `cmd` directory, the paths to the files for deploying Connect have to be adjusted based on the new location `main.go` is executed from.

* Update files based on new structure and scaffolding

These changes are made based on the new project structure and scaffolding obtained when using the new go/v4 project structure.

These were done based on the migration steps mentioned when migrating to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4).

* Update config files

These updates are made based on the Kustomize v4 syntax.

This is part of the upgrate to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4)

* Update dependencies and GO version

* Update vendor

* Update Kubernetes tools versions

* Update operator version in Makefile

Now the version in the Makefile matches the version of the operator

* Update Operator SDK version in version.go

* Adjust generated deepcopy

It seems that the +build tag is no longer needed based on the latest generated scaffolding, therefore it's removed.

* Update copyright year

* Bring back missing changes from migration

Some customization in Makefile was lost during the migration process. Specifically, the namespace customization for `make deploy` command.

Also, we push changes to kustomization.yaml for making the deploy process smoother.

* Add RBAC perms for coordination.k8s.io

It seems that with the latest changes to Kubernetes and Kustomize, we need to add additional RBAC to the service account used so that it can properly access the `leases` resource.

* Optimize Dockerfile

Dockerfile had a step for caching dependencies (go mod download). However, this is already done by the vendor directory, which we include. Therefore, this step can be removed to make the image build time faster.
This commit is contained in:
Eduard Filip
2024-01-25 14:21:31 +01:00
committed by GitHub
parent 8fc852a4dd
commit f72e5243b0
1356 changed files with 86780 additions and 43671 deletions

20
vendor/golang.org/x/net/html/doc.go generated vendored
View File

@@ -99,14 +99,20 @@ Care should be taken when parsing and interpreting HTML, whether full documents
or fragments, within the framework of the HTML specification, especially with
regard to untrusted inputs.
This package provides both a tokenizer and a parser. Only the parser constructs
a DOM according to the HTML specification, resolving malformed and misplaced
tags where appropriate. The tokenizer simply tokenizes the HTML presented to it,
and as such does not resolve issues that may exist in the processed HTML,
producing a literal interpretation of the input.
This package provides both a tokenizer and a parser, which implement the
tokenization, and tokenization and tree construction stages of the WHATWG HTML
parsing specification respectively. While the tokenizer parses and normalizes
individual HTML tokens, only the parser constructs the DOM tree from the
tokenized HTML, as described in the tree construction stage of the
specification, dynamically modifying or extending the docuemnt's DOM tree.
If your use case requires semantically well-formed HTML, as defined by the
WHATWG specifiction, the parser should be used rather than the tokenizer.
If your use case requires semantically well-formed HTML documents, as defined by
the WHATWG specification, the parser should be used rather than the tokenizer.
In security contexts, if trust decisions are being made using the tokenized or
parsed content, the input must be re-serialized (for instance by using Render or
Token.String) in order for those trust decisions to hold, as the process of
tokenization or parsing may alter the content.
*/
package html // import "golang.org/x/net/html"

View File

@@ -194,9 +194,8 @@ func render1(w writer, n *Node) error {
}
}
// Render any child nodes.
switch n.Data {
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
// Render any child nodes
if childTextNodesAreLiteral(n) {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if c.Type == TextNode {
if _, err := w.WriteString(c.Data); err != nil {
@@ -213,7 +212,7 @@ func render1(w writer, n *Node) error {
// last element in the file, with no closing tag.
return plaintextAbort
}
default:
} else {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if err := render1(w, c); err != nil {
return err
@@ -231,6 +230,27 @@ func render1(w writer, n *Node) error {
return w.WriteByte('>')
}
func childTextNodesAreLiteral(n *Node) bool {
// Per WHATWG HTML 13.3, if the parent of the current node is a style,
// script, xmp, iframe, noembed, noframes, or plaintext element, and the
// current node is a text node, append the value of the node's data
// literally. The specification is not explicit about it, but we only
// enforce this if we are in the HTML namespace (i.e. when the namespace is
// "").
// NOTE: we also always include noscript elements, although the
// specification states that they should only be rendered as such if
// scripting is enabled for the node (which is not something we track).
if n.Namespace != "" {
return false
}
switch n.Data {
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
return true
default:
return false
}
}
// writeQuoted writes s to w surrounded by quotes. Normally it will use double
// quotes, but if s contains a double quote, it will use single quotes.
// It is used for writing the identifiers in a doctype declaration.

View File

@@ -913,7 +913,14 @@ func (z *Tokenizer) readTagAttrKey() {
case ' ', '\n', '\r', '\t', '\f', '/':
z.pendingAttr[0].end = z.raw.end - 1
return
case '=', '>':
case '=':
if z.pendingAttr[0].start+1 == z.raw.end {
// WHATWG 13.2.5.32, if we see an equals sign before the attribute name
// begins, we treat it as a character in the attribute name and continue.
continue
}
fallthrough
case '>':
z.raw.end--
z.pendingAttr[0].end = z.raw.end
return

View File

@@ -1,51 +0,0 @@
#
# This Dockerfile builds a recent curl with HTTP/2 client support, using
# a recent nghttp2 build.
#
# See the Makefile for how to tag it. If Docker and that image is found, the
# Go tests use this curl binary for integration tests.
#
FROM ubuntu:trusty
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y git-core build-essential wget
RUN apt-get install -y --no-install-recommends \
autotools-dev libtool pkg-config zlib1g-dev \
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
automake autoconf
# The list of packages nghttp2 recommends for h2load:
RUN apt-get install -y --no-install-recommends make binutils \
autoconf automake autotools-dev \
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
cython python3.4-dev python-setuptools
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
ENV NGHTTP2_VER 895da9a
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
WORKDIR /root/nghttp2
RUN git reset --hard $NGHTTP2_VER
RUN autoreconf -i
RUN automake
RUN autoconf
RUN ./configure
RUN make
RUN make install
WORKDIR /root
RUN wget https://curl.se/download/curl-7.45.0.tar.gz
RUN tar -zxvf curl-7.45.0.tar.gz
WORKDIR /root/curl-7.45.0
RUN ./configure --with-ssl --with-nghttp2=/usr/local
RUN make
RUN make install
RUN ldconfig
CMD ["-h"]
ENTRYPOINT ["/usr/local/bin/curl"]

View File

@@ -1,3 +0,0 @@
curlimage:
docker build -t gohttp2/curl .

View File

@@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
p.c.L = &p.mu
}
defer p.c.Signal()
if p.err != nil {
if p.err != nil || p.breakErr != nil {
return 0, errClosedPipeWrite
}
if p.breakErr != nil {
p.unread += len(d)
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
}

View File

@@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
if s.NewWriteScheduler != nil {
sc.writeSched = s.NewWriteScheduler()
} else {
sc.writeSched = NewPriorityWriteScheduler(nil)
sc.writeSched = newRoundRobinWriteScheduler()
}
// These start at the RFC-specified defaults. If there is a higher
@@ -581,9 +581,11 @@ type serverConn struct {
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client
curPushedStreams uint32 // number of open streams initiated by server push
curHandlers uint32 // number of running handler goroutines
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*stream
unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32
maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default)
@@ -981,6 +983,8 @@ func (sc *serverConn) serve() {
return
case gracefulShutdownMsg:
sc.startGracefulShutdownInternal()
case handlerDoneMsg:
sc.handlerDone()
default:
panic("unknown timer")
}
@@ -1012,14 +1016,6 @@ func (sc *serverConn) serve() {
}
}
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
select {
case <-sc.doneServing:
case <-sharedCh:
close(privateCh)
}
}
type serverMessage int
// Message values sent to serveMsgCh.
@@ -1028,6 +1024,7 @@ var (
idleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
handlerDoneMsg = new(serverMessage)
)
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
@@ -1822,15 +1819,18 @@ func (sc *serverConn) processData(f *DataFrame) error {
}
if len(data) > 0 {
st.bodyBytes += int64(len(data))
wrote, err := st.body.Write(data)
if err != nil {
// The handler has closed the request body.
// Return the connection-level flow control for the discarded data,
// but not the stream-level flow control.
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
return nil
}
if wrote != len(data) {
panic("internal error: bad Writer")
}
st.bodyBytes += int64(len(data))
}
// Return any padded flow control now, since we won't
@@ -1897,9 +1897,11 @@ func (st *stream) copyTrailersToHandlerRequest() {
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
// when the stream's ReadTimeout has fired.
func (st *stream) onReadTimeout() {
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
// returning the bare error.
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
if st.body != nil {
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
// returning the bare error.
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
}
}
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
@@ -2017,13 +2019,10 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout != 0 {
sc.conn.SetReadDeadline(time.Time{})
if st.body != nil {
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
go sc.runHandler(rw, req, handler)
return nil
return sc.scheduleHandler(id, rw, req, handler)
}
func (sc *serverConn) upgradeRequest(req *http.Request) {
@@ -2043,6 +2042,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) {
sc.conn.SetReadDeadline(time.Time{})
}
// This is the first request on the connection,
// so start the handler directly rather than going
// through scheduleHandler.
sc.curHandlers++
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
}
@@ -2283,8 +2286,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response
return &responseWriter{rws: rws}
}
type unstartedHandler struct {
streamID uint32
rw *responseWriter
req *http.Request
handler func(http.ResponseWriter, *http.Request)
}
// scheduleHandler starts a handler goroutine,
// or schedules one to start as soon as an existing handler finishes.
func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
sc.serveG.check()
maxHandlers := sc.advMaxStreams
if sc.curHandlers < maxHandlers {
sc.curHandlers++
go sc.runHandler(rw, req, handler)
return nil
}
if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
}
sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
streamID: streamID,
rw: rw,
req: req,
handler: handler,
})
return nil
}
func (sc *serverConn) handlerDone() {
sc.serveG.check()
sc.curHandlers--
i := 0
maxHandlers := sc.advMaxStreams
for ; i < len(sc.unstartedHandlers); i++ {
u := sc.unstartedHandlers[i]
if sc.streams[u.streamID] == nil {
// This stream was reset before its goroutine had a chance to start.
continue
}
if sc.curHandlers >= maxHandlers {
break
}
sc.curHandlers++
go sc.runHandler(u.rw, u.req, u.handler)
sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
}
sc.unstartedHandlers = sc.unstartedHandlers[i:]
if len(sc.unstartedHandlers) == 0 {
sc.unstartedHandlers = nil
}
}
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
rw.rws.stream.cancelCtx()
@@ -2426,7 +2483,7 @@ type requestBody struct {
conn *serverConn
closeOnce sync.Once // for use by Close only
sawEOF bool // for use by Read only
pipe *pipe // non-nil if we have a HTTP entity message body
pipe *pipe // non-nil if we have an HTTP entity message body
needsContinue bool // need to send a 100-continue
}
@@ -2566,7 +2623,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
clen = ""
}
}
if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
_, hasContentLength := rws.snapHeader["Content-Length"]
if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
clen = strconv.Itoa(len(p))
}
_, hasContentType := rws.snapHeader["Content-Type"]
@@ -2771,7 +2829,7 @@ func (w *responseWriter) FlushError() error {
err = rws.bw.Flush()
} else {
// The bufio.Writer won't call chunkWriter.Write
// (writeChunk with zero bytes, so we have to do it
// (writeChunk with zero bytes), so we have to do it
// ourselves to force the HTTP response header and/or
// final DATA frame (with END_STREAM) to be sent.
_, err = chunkWriter{rws}.Write(nil)

View File

@@ -19,6 +19,7 @@ import (
"io/fs"
"log"
"math"
"math/bits"
mathrand "math/rand"
"net"
"net/http"
@@ -290,8 +291,7 @@ func (t *Transport) initConnPool() {
// HTTP/2 server.
type ClientConn struct {
t *Transport
tconn net.Conn // usually *tls.Conn, except specialized impls
tconnClosed bool
tconn net.Conn // usually *tls.Conn, except specialized impls
tlsState *tls.ConnectionState // nil only for specialized impls
reused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request
@@ -518,11 +518,14 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
func authorityAddr(scheme string, authority string) (addr string) {
host, port, err := net.SplitHostPort(authority)
if err != nil { // authority didn't have a port
host = authority
port = ""
}
if port == "" { // authority's port was empty
port = "443"
if scheme == "http" {
port = "80"
}
host = authority
}
if a, err := idna.ToASCII(host); err == nil {
host = a
@@ -560,10 +563,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
traceGotConn(req, cc, reused)
res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
roundTripErr := err
if req, err = shouldRetryRequest(req, err); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
}
backoff := float64(uint(1) << (uint(retry) - 1))
@@ -572,7 +576,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
timer := backoffNewTimer(d)
select {
case <-timer.C:
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
timer.Stop()
@@ -1265,6 +1269,29 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return res, nil
}
cancelRequest := func(cs *clientStream, err error) error {
cs.cc.mu.Lock()
bodyClosed := cs.reqBodyClosed
cs.cc.mu.Unlock()
// Wait for the request body to be closed.
//
// If nothing closed the body before now, abortStreamLocked
// will have started a goroutine to close it.
//
// Closing the body before returning avoids a race condition
// with net/http checking its readTrackingBody to see if the
// body was read from or closed. See golang/go#60041.
//
// The body is closed in a separate goroutine without the
// connection mutex held, but dropping the mutex before waiting
// will keep us from holding it indefinitely if the body
// close is slow for some reason.
if bodyClosed != nil {
<-bodyClosed
}
return err
}
for {
select {
case <-cs.respHeaderRecv:
@@ -1284,10 +1311,10 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
case <-ctx.Done():
err := ctx.Err()
cs.abortStream(err)
return nil, err
return nil, cancelRequest(cs, err)
case <-cs.reqCancel:
cs.abortStream(errRequestCanceled)
return nil, errRequestCanceled
return nil, cancelRequest(cs, errRequestCanceled)
}
}
}
@@ -1653,7 +1680,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
return int(n) // doesn't truncate; max is 512K
}
var bufPool sync.Pool // of *[]byte
// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
// streaming requests using small frame sizes occupy large buffers initially allocated for prior
// requests needing big buffers. The size ranges are as follows:
// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
// {256 KB, 512 KB], {512 KB, infinity}
// In practice, the maximum scratch buffer size should not exceed 512 KB due to
// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
// It exists mainly as a safety measure, for potential future increases in max buffer size.
var bufPools [7]sync.Pool // of *[]byte
func bufPoolIndex(size int) int {
if size <= 16384 {
return 0
}
size -= 1
bits := bits.Len(uint(size))
index := bits - 14
if index >= len(bufPools) {
return len(bufPools) - 1
}
return index
}
func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
cc := cs.cc
@@ -1671,12 +1718,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
// Scratch buffer for reading into & writing from.
scratchLen := cs.frameScratchBufferLen(maxFrameSize)
var buf []byte
if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
defer bufPool.Put(bp)
index := bufPoolIndex(scratchLen)
if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
defer bufPools[index].Put(bp)
buf = *bp
} else {
buf = make([]byte, scratchLen)
defer bufPool.Put(&buf)
defer bufPools[index].Put(&buf)
}
var sawEOF bool
@@ -1844,6 +1892,9 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if err != nil {
return nil, err
}
if !httpguts.ValidHostHeader(host) {
return nil, errors.New("http2: invalid Host header")
}
var path string
if req.Method != "CONNECT" {
@@ -1880,7 +1931,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production (see Sections 3.3 and 3.4 of
// followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
@@ -2555,6 +2606,9 @@ func (b transportResponseBody) Close() error {
cs := b.cs
cc := cs.cc
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
unread := cs.bufPipe.Len()
if unread > 0 {
cc.mu.Lock()
@@ -2573,9 +2627,6 @@ func (b transportResponseBody) Close() error {
cc.wmu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
select {
case <-cs.donec:
case <-cs.ctx.Done():

View File

@@ -184,7 +184,8 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
// writeQueue is used by implementations of WriteScheduler.
type writeQueue struct {
s []FrameWriteRequest
s []FrameWriteRequest
prev, next *writeQueue
}
func (q *writeQueue) empty() bool { return len(q.s) == 0 }

119
vendor/golang.org/x/net/http2/writesched_roundrobin.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"fmt"
"math"
)
type roundRobinWriteScheduler struct {
// control contains control frames (SETTINGS, PING, etc.).
control writeQueue
// streams maps stream ID to a queue.
streams map[uint32]*writeQueue
// stream queues are stored in a circular linked list.
// head is the next stream to write, or nil if there are no streams open.
head *writeQueue
// pool of empty queues for reuse.
queuePool writeQueuePool
}
// newRoundRobinWriteScheduler constructs a new write scheduler.
// The round robin scheduler priorizes control frames
// like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin
// selection from the ready streams.
func newRoundRobinWriteScheduler() WriteScheduler {
ws := &roundRobinWriteScheduler{
streams: make(map[uint32]*writeQueue),
}
return ws
}
func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
if ws.streams[streamID] != nil {
panic(fmt.Errorf("stream %d already opened", streamID))
}
q := ws.queuePool.get()
ws.streams[streamID] = q
if ws.head == nil {
ws.head = q
q.next = q
q.prev = q
} else {
// Queues are stored in a ring.
// Insert the new stream before ws.head, putting it at the end of the list.
q.prev = ws.head.prev
q.next = ws.head
q.prev.next = q
q.next.prev = q
}
}
func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) {
q := ws.streams[streamID]
if q == nil {
return
}
if q.next == q {
// This was the only open stream.
ws.head = nil
} else {
q.prev.next = q.next
q.next.prev = q.prev
if ws.head == q {
ws.head = q.next
}
}
delete(ws.streams, streamID)
ws.queuePool.put(q)
}
func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {}
func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) {
if wr.isControl() {
ws.control.push(wr)
return
}
q := ws.streams[wr.StreamID()]
if q == nil {
// This is a closed stream.
// wr should not be a HEADERS or DATA frame.
// We push the request onto the control queue.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
ws.control.push(wr)
return
}
q.push(wr)
}
func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) {
// Control and RST_STREAM frames first.
if !ws.control.empty() {
return ws.control.shift(), true
}
if ws.head == nil {
return FrameWriteRequest{}, false
}
q := ws.head
for {
if wr, ok := q.consume(math.MaxInt32); ok {
ws.head = q.next
return wr, true
}
q = q.next
if q == ws.head {
break
}
}
return FrameWriteRequest{}, false
}

View File

@@ -121,7 +121,7 @@ func CheckJoiners(enable bool) Option {
}
}
// StrictDomainName limits the set of permissable ASCII characters to those
// StrictDomainName limits the set of permissible ASCII characters to those
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
// but is only useful if ValidateLabels is set.

File diff suppressed because it is too large Load Diff

5145
vendor/golang.org/x/net/idna/tables15.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

21
vendor/golang.org/x/net/idna/trie.go generated vendored
View File

@@ -6,27 +6,6 @@
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
s := mappings[index:]
return append(b, s[1:s[0]+1]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}
// Sparse block handling code.
type valueRange struct {

31
vendor/golang.org/x/net/idna/trie12.0.0.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.16
// +build !go1.16
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
s := mappings[index:]
return append(b, s[1:s[0]+1]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}

31
vendor/golang.org/x/net/idna/trie13.0.0.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.16
// +build go1.16
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
p := index
return append(b, mappings[mappingIndex[p]:mappingIndex[p+1]]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}