diff --git a/go.mod b/go.mod index bff9518..89d3609 100644 --- a/go.mod +++ b/go.mod @@ -1,87 +1,91 @@ module github.com/1Password/onepassword-operator -go 1.18 +go 1.19 require ( github.com/1Password/connect-sdk-go v1.5.0 - github.com/onsi/ginkgo/v2 v2.1.6 - github.com/onsi/gomega v1.20.2 - github.com/stretchr/testify v1.8.0 - k8s.io/api v0.25.3 - k8s.io/apimachinery v0.25.3 - k8s.io/client-go v0.25.3 - k8s.io/kubectl v0.25.0 - sigs.k8s.io/controller-runtime v0.13.0 + github.com/onsi/ginkgo/v2 v2.9.2 + github.com/onsi/gomega v1.27.5 + github.com/stretchr/testify v1.8.2 + k8s.io/api v0.26.3 + k8s.io/apimachinery v0.26.3 + k8s.io/client-go v0.26.3 + k8s.io/kubectl v0.26.3 + sigs.k8s.io/controller-runtime v0.14.5 ) require ( - cloud.google.com/go/compute v1.10.0 // indirect + cloud.google.com/go/compute v1.14.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.28 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/zapr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.4.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect github.com/google/uuid v1.3.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/imdario/mergo v0.3.15 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.13.0 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.23.0 // indirect - golang.org/x/crypto v0.1.0 // indirect - golang.org/x/net v0.1.0 // indirect - golang.org/x/oauth2 v0.1.0 // indirect - golang.org/x/sys v0.1.0 // indirect - golang.org/x/term v0.1.0 // indirect - golang.org/x/text v0.4.0 // indirect - golang.org/x/time v0.1.0 // indirect + go.uber.org/multierr v1.10.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/crypto v0.7.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/oauth2 v0.6.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/term v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.7.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.25.3 // indirect - k8s.io/component-base v0.25.3 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + k8s.io/apiextensions-apiserver v0.26.3 // indirect + k8s.io/component-base v0.26.3 // indirect + k8s.io/klog/v2 v2.90.1 // indirect + k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect + k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index e3b7b1d..ba96de0 100644 --- a/go.sum +++ b/go.sum @@ -21,6 +21,10 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute v1.10.0 h1:aoLIYaA1fX3ywihqpBk2APQKOo20nXsp1GEZQbx5Jk4= cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -42,6 +46,8 @@ github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5ne github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -71,10 +77,13 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -89,6 +98,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -125,12 +136,18 @@ github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -138,6 +155,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -169,6 +188,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= @@ -196,6 +217,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= @@ -208,6 +231,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -230,6 +255,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -241,6 +267,8 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -257,8 +285,12 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/gomega v1.20.2 h1:8uQq0zMgLEfa0vRrrBgaJF2gyW9Da9BmfGV+OyUzfkY= github.com/onsi/gomega v1.20.2/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.27.5 h1:T/X6I0RNFw/kTqgfkZPcQ5KU6vCnWNBGdtrIx2dpGeQ= +github.com/onsi/gomega v1.27.5/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -274,6 +306,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -286,6 +320,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= @@ -293,6 +329,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -306,14 +344,19 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= @@ -325,6 +368,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -340,9 +384,13 @@ go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -353,6 +401,9 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -387,6 +438,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -423,8 +475,12 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -434,6 +490,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -444,6 +502,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -485,13 +544,21 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -502,11 +569,16 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -552,6 +624,9 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -647,12 +722,15 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -681,30 +759,52 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI= +k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= +k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= k8s.io/apiextensions-apiserver v0.25.3 h1:bfI4KS31w2f9WM1KLGwnwuVlW3RSRPuIsfNF/3HzR0k= k8s.io/apiextensions-apiserver v0.25.3/go.mod h1:ZJqwpCkxIx9itilmZek7JgfUAM0dnTsA48I4krPqRmo= +k8s.io/apiextensions-apiserver v0.26.3 h1:5PGMm3oEzdB1W/FTMgGIDmm100vn7IaUP5er36dB+YE= +k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN6VJrElV3sfpQ= k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= +k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= +k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA= +k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= +k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= k8s.io/component-base v0.25.3 h1:UrsxciGdrCY03ULT1h/S/gXFCOPnLhUVwSyx+hM/zq4= k8s.io/component-base v0.25.3/go.mod h1:WYoS8L+IlTZgU7rhAl5Ctpw0WdMxDfCC5dkxcEFa/TI= +k8s.io/component-base v0.26.3 h1:oC0WMK/ggcbGDTkdcqefI4wIZRYdK3JySx9/HADpV0g= +k8s.io/component-base v0.26.3/go.mod h1:5kj1kZYwSC6ZstHJN7oHBqcJC6yyn41eR+Sqa/mQc8E= k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= +k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= k8s.io/kubectl v0.25.0 h1:/Wn1cFqo8ik3iee1EvpxYre3bkWsGLXzLQI6uCCAkQc= k8s.io/kubectl v0.25.0/go.mod h1:n16ULWsOl2jmQpzt2o7Dud1t4o0+Y186ICb4O+GwKAU= +k8s.io/kubectl v0.26.3 h1:bZ5SgFyeEXw6XTc1Qji0iNdtqAC76lmeIIQULg2wNXM= +k8s.io/kubectl v0.26.3/go.mod h1:02+gv7Qn4dupzN3fi/9OvqqdW+uG/4Zi56vc4Zmsp1g= k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85 h1:cTdVh7LYu82xeClmfzGtgyspNh6UxpwLWGi8R4sspNo= k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY= +k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRMc2WIQ= sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= +sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s= +sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go deleted file mode 100644 index 50538b1..0000000 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://cloud.google.com/compute/docs/metadata/overview. -package metadata // import "cloud.google.com/go/compute/metadata" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" - - userAgent = "gcloud-golang/0.1" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var defaultClient = &Client{hc: newDefaultHTTPClient()} - -func newDefaultHTTPClient() *http.Client { - return &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - Timeout: 5 * time.Second, - } -} - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -func (c *cachedValue) get(cl *Client) (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = cl.getTrimmed(c.k) - } else { - v, err = cl.Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/googleapis/google-cloud-go/issues/194 - go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := newDefaultHTTPClient().Do(req.WithContext(ctx)) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe calls Client.Subscribe on the default client. -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return defaultClient.Subscribe(suffix, fn) -} - -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } - -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } - -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } - -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } - -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. -func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) -} - -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. -func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) -} - -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -// A Client provides metadata. -type Client struct { - hc *http.Client -} - -// NewClient returns a Client that can be used to fetch metadata. -// Returns the client that uses the specified http.Client for HTTP requests. -// If nil is specified, returns the default client. -func NewClient(c *http.Client) *Client { - if c == nil { - return defaultClient - } - - return &Client{hc: c} -} - -// getETag returns a value from the metadata service as well as the associated ETag. -// This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - ctx := context.TODO() - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - suffix = strings.TrimLeft(suffix, "/") - u := "http://" + host + "/computeMetadata/v1/" + suffix - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return "", "", err - } - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - var res *http.Response - var reqErr error - retryer := newRetryer() - for { - res, reqErr = c.hc.Do(req) - var code int - if res != nil { - code = res.StatusCode - } - if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { - if err := sleep(ctx, delay); err != nil { - return "", "", err - } - continue - } - break - } - if reqErr != nil { - return "", "", reqErr - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} - } - return string(all), res.Header.Get("Etag"), nil -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) - return val, err -} - -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } - -// NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } - -// InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } - -// InternalIP returns the instance's primary internal IP address. -func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") -} - -// Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Email(serviceAccount string) (string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func (c *Client) InstanceTags() ([]string, error) { - var s []string - j, err := c.Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceName returns the current VM's instance ID string. -func (c *Client) InstanceName() (string, error) { - return c.getTrimmed("instance/name") -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code. - Code int - // Message is the server response message. - Message string -} - -func (e *Error) Error() string { - return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) -} diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go deleted file mode 100644 index 0f18f3c..0000000 --- a/vendor/cloud.google.com/go/compute/metadata/retry.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "context" - "io" - "math/rand" - "net/http" - "time" -) - -const ( - maxRetryAttempts = 5 -) - -var ( - syscallRetryable = func(err error) bool { return false } -) - -// defaultBackoff is basically equivalent to gax.Backoff without the need for -// the dependency. -type defaultBackoff struct { - max time.Duration - mul float64 - cur time.Duration -} - -func (b *defaultBackoff) Pause() time.Duration { - d := time.Duration(1 + rand.Int63n(int64(b.cur))) - b.cur = time.Duration(float64(b.cur) * b.mul) - if b.cur > b.max { - b.cur = b.max - } - return d -} - -// sleep is the equivalent of gax.Sleep without the need for the dependency. -func sleep(ctx context.Context, d time.Duration) error { - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return ctx.Err() - case <-t.C: - return nil - } -} - -func newRetryer() *metadataRetryer { - return &metadataRetryer{bo: &defaultBackoff{ - cur: 100 * time.Millisecond, - max: 30 * time.Second, - mul: 2, - }} -} - -type backoff interface { - Pause() time.Duration -} - -type metadataRetryer struct { - bo backoff - attempts int -} - -func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) { - if status == http.StatusOK { - return 0, false - } - retryOk := shouldRetry(status, err) - if !retryOk { - return 0, false - } - if r.attempts == maxRetryAttempts { - return 0, false - } - r.attempts++ - return r.bo.Pause(), true -} - -func shouldRetry(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - // Transient network errors should be retried. - if syscallRetryable(err) { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - if err.Temporary() { - return true - } - } - if err, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(status, err.Unwrap()) - } - return false -} diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore deleted file mode 100644 index 3350aaf..0000000 --- a/vendor/github.com/Azure/go-autorest/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -.DS_Store -.idea/ -.vscode/ - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# go-autorest specific -vendor/ -autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md deleted file mode 100644 index d1f596b..0000000 --- a/vendor/github.com/Azure/go-autorest/CHANGELOG.md +++ /dev/null @@ -1,1004 +0,0 @@ -# CHANGELOG - -## v14.2.0 - -- Added package comment to make `github.com/Azure/go-autorest` importable. - -## v14.1.1 - -### Bug Fixes - -- Change `x-ms-authorization-auxiliary` header value separator to comma. - -## v14.1.0 - -### New Features - -- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. - -## v14.0.1 - -### Bug Fixes - -- Fix race condition when refreshing token. -- Fixed some tests to work with Go 1.14. - -## v14.0.0 - -## Breaking Changes - -- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. - -## New Features - -- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. - -## v13.4.0 - -## New Features - -- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. -- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. - -## v13.3.3 - -### Bug Fixes - -- Fixed connection leak when retrying requests. -- Enabled exponential back-off with a 2-minute cap when retrying on 429. -- Fixed some cases where errors were inadvertently dropped. - -## v13.3.2 - -### Bug Fixes - -- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. - -## v13.3.1 - -- Updated external dependencies. - -### Bug Fixes - -## v13.3.0 - -### New Features - -- Added support for shared key and shared access signature token authorization. - - `autorest.NewSharedKeyAuthorizer()` and dependent types. - - `autorest.NewSASTokenAuthorizer()` and dependent types. -- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. - -### Bug Fixes - -- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. -- Support parsing error messages in XML responses. - -## v13.2.0 - -### New Features - -- Added the following functions to replace their versions that don't take a context. - - `adal.InitiateDeviceAuthWithContext()` - - `adal.CheckForUserCompletionWithContext()` - - `adal.WaitForUserCompletionWithContext()` - -## v13.1.0 - -### New Features - -- Added support for MSI authentication on Azure App Service and Azure Functions. - -## v13.0.2 - -### Bug Fixes - -- Always retry a request even if the sender returns a non-nil error. - -## v13.0.1 - -## Bug Fixes - -- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. - -## v13.0.0 - -## Breaking Changes - -The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. -What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` -environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. -```go - import _ "github.com/Azure/go-autorest/tracing/opencensus" -``` -The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. -The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). -- tracing.Transport -- tracing.Enable() -- tracing.EnableWithAIForwarding() -- tracing.Disable() - -The following APIs and types have been added -- tracing.Tracer -- tracing.Register() - -To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. - -## v12.4.3 - -### Bug Fixes - -- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. - -## v12.4.2 - -### Bug Fixes - -- Improvements to the fixes made in v12.4.1. - - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. - - Switched to latest version of `ocagent` that still depends on protobuf v1.2. - - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. - -## v12.4.1 - -### Bug Fixes - -- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. -- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. - -## v12.4.0 - -### New Features - -- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. - -## v12.3.0 - -### New Features - -- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with - secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding - MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. - The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS - is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. - See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` - along with their supporting types and methods. -- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. -- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. - -## v12.2.0 - -### New Features - -- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. -- Added `autorest.ByUnmarshallingBytes` response decorator. -- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. - -### Bug Fixes - -- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. - -## v12.1.0 - -### New Features - -- Added `to.ByteSlicePtr()`. -- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. - -## v12.0.0 - -### Breaking Changes - -In preparation for modules the following deprecated content has been removed. - - - async.NewFuture() - - async.Future.Done() - - async.Future.WaitForCompletion() - - async.DoPollForAsynchronous() - - The `utils` package - - validation.NewErrorWithValidationError() - - The `version` package - -## v11.9.0 - -### New Features - -- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. - -## v11.8.0 - -### New Features - -- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. - -## v11.7.1 - -### Bug Fixes - -- Fix missing support for http(s) proxy when using the default sender. - -## v11.7.0 - -### New Features - -- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. - -## v11.6.1 - -### Bug Fixes - -- Fix ACR DNS endpoint for government clouds. -- Add Cosmos DB DNS endpoints. -- Update dependencies to resolve build breaks in OpenCensus. - -## v11.6.0 - -### New Features - -- Added type `autorest.BasicAuthorizer` to support Basic authentication. - -## v11.5.2 - -### Bug Fixes - -- Fixed `GetTokenFromCLI` did not work with zsh. - -## v11.5.1 - -### Bug Fixes - -- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. - -## v11.5.0 - -### New Features - -- The `auth` package has been refactored so that the environment and file settings are now available. -- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. -- Added support for certificate authorization for file-based config. - -## v11.4.0 - -### New Features - -- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. -- Exported `adal.UserAgent()` for parity with `autorest.Client`. - -## v11.3.2 - -### Bug Fixes - -- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. - -## v11.3.1 - -### Bug Fixes - -- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. - -## v11.3.0 - -### New Features - -- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. - -## v11.2.8 - -### Bug Fixes - -- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. - -## v11.2.7 - -### Bug Fixes - -- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. - Note that for backward compatibility reasons, both will work until the next major version release of the package. - -## v11.2.6 - -### Bug Fixes - -- If zero bytes are read from a polling response body don't attempt to unmarshal them. - -## v11.2.5 - -### Bug Fixes - -- Removed race condition in `autorest.DoRetryForStatusCodes`. - -## v11.2.4 - -### Bug Fixes - -- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. - -## v11.2.1 - -NOTE: Versions of Go prior to 1.10 have been removed from CI as they no -longer work with golint. - -### Bug Fixes - -- Method `MSIConfig.Authorizer` now supports user-assigned identities. -- The adal package now reports its own user-agent string. - -## v11.2.0 - -### New Features - -- Added `tracing` package that enables instrumentation of HTTP and API calls. - Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` - will start instrumenting the code for metrics and traces. - Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or - calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an - App Insights Local Forwarder that is needs to be running. Note that if the - AI Local Forwarder is not running tracking will still be enabled. - By default, instrumentation is disabled. Once enabled, instrumentation can also - be programatically disabled by calling `Disable`. -- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. - -### Bug Fixes - -- Don't use the initial request's context for LRO polling. -- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if - it is already set. - -## v11.1.1 - -### Bug Fixes - -- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. - -## v11.1.0 - -### New Features - -- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. -- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. - -## v11.0.1 - -### New Features - -- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. - -## v11.0.0 - -### Breaking Changes - -- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` - - ExpiresIn - - ExpiresOn - - NotBefore - -### New Features - -- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. -- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. - -## v10.15.5 - -### Bug Fixes - -- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. - -## v10.15.4 - -### Bug Fixes - -- If a polling operation returns a failure status code return the associated error. - -## v10.15.3 - -### Bug Fixes - -- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. - -## v10.15.2 - -### Bug Fixes - -- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. - -## v10.15.1 - -### Bug Fixes - -- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. -- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. - -## v10.15.0 - -### New Features - -- Add initial support for request/response logging via setting environment variables. - Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response - without their bodies. To include the bodies set the log level to `LogDebug`. - By default the logger writes to strerr, however it can also write to stdout or a file - if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file - already exists it will be truncated. - IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key - headers. Any other secrets will _not_ be redacted. - -## v10.14.0 - -### New Features - -- Added package version that contains version constants and user-agent data. - -### Bug Fixes - -- Add the user-agent to token requests. - -## v10.13.0 - -- Added support for additionalInfo in ServiceError type. - -## v10.12.0 - -### New Features - -- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. - -## v10.11.4 - -### Bug Fixes - -- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). -- If there is no "final GET URL" return an error from Future.GetResult(). - -## v10.11.3 - -### Bug Fixes - -- In IMDS retry logic, if we don't receive a response don't retry. - - Renamed the retry function so it's clear it's meant for IMDS only. -- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. - - Also add the raw HTTP response to the DetailedResponse. -- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). - -## v10.11.2 - -### Bug Fixes - -- Validation for integers handles int and int64 types. - -## v10.11.1 - -### Bug Fixes - -- Adding User information to authorization config as parsed from CLI cache. - -## v10.11.0 - -### New Features - -- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret -- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token - -## v10.10.0 - -### New Features - -- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). -- Added method ServicePrincipalToken.SetRefreshCallbacks(). - -## v10.9.2 - -### Bug Fixes - -- Refreshing a refresh token obtained from a web app authorization code now works. - -## v10.9.1 - -### Bug Fixes - -- The retry logic for MSI token requests now uses exponential backoff per the guidelines. -- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. - -## v10.9.0 - -### Deprecated Methods - -| Old Method | New Method | -| -------------------------: | :---------------------------: | -| azure.NewFuture() | azure.NewFutureFromResponse() | -| Future.WaitForCompletion() | Future.WaitForCompletionRef() | - -### New Features - -- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. -- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. - -### Bug Fixes - -- Some futures failed to return their results, this should now be fixed. - -## v10.8.2 - -### Bug Fixes - -- Add nil-gaurd to token retry logic. - -## v10.8.1 - -### Bug Fixes - -- Return a TokenRefreshError if the sender fails on the initial request. -- Don't retry on non-temporary network errors. - -## v10.8.0 - -- Added NewAuthorizerFromEnvironmentWithResource() helper function. - -## v10.7.0 - -### New Features - -- Added \*WithContext() methods to ADAL token refresh operations. - -## v10.6.2 - -- Fixed a bug on device authentication. - -## v10.6.1 - -- Added retries to MSI token get request. - -## v10.6.0 - -- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. - -## v10.5.1 - -### Bug Fixes - -- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. - -## v10.5.0 - -### New Features - -- Added NewPollingRequestWithContext() for use with polling asynchronous operations. - -### Bug Fixes - -- Make retry logic use the request's context instead of the deprecated Cancel object. - -## v10.4.0 - -### New Features - -- Added helper for parsing Azure Resource ID's. -- Added deprecation message to utils.GetEnvVarOrExit() - -## v10.3.0 - -### New Features - -- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints -- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint - -## v10.2.0 - -### New Features - -- Added endpoints for batch management. - -## v10.1.3 - -### Bug Fixes - -- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). -- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. - -## v10.1.2 - -- Corrected comment for auth.NewAuthorizerFromFile() function. - -## v10.1.1 - -- Updated version number to match current release. - -## v10.1.0 - -### New Features - -- Expose the polling URL for futures. - -### Bug Fixes - -- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). - -## v10.0.0 - -### New Features - -- Added target and innererror fields to ServiceError to comply with OData v4 spec. -- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). -- Added helper methods for obtaining authorizers. -- Expose the polling URL for futures. - -### Bug Fixes - -- Switched from glide to dep for dependency management. -- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. -- Fixed a race condition in token refresh. - -### Breaking Changes - -- The ServiceError.Details field type has been changed to match the OData v4 spec. -- Go v1.7 has been dropped from CI. -- API parameter validation failures will now return a unique error type validation.Error. -- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). - -## v9.10.0 - -- Fix the Service Bus suffix in Azure public env -- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) - -## v9.9.0 - -### New Features - -- Added EventGridKeyAuthorizer for key authorization with event grid topics. - -### Bug Fixes - -- Fixed race condition when auto-refreshing service principal tokens. - -## v9.8.1 - -### Bug Fixes - -- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. -- Updated runtime version info so it's current. - -## v9.8.0 - -### New Features - -- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. - -## v9.7.1 - -### Bug Fixes - -- Use correct AAD and Graph endpoints for US Gov environment. - -## v9.7.0 - -### New Features - -- Added support for application/octet-stream MIME types. - -## v9.6.1 - -### Bug Fixes - -- Ensure Authorization header is added to request when polling for registration status. - -## v9.6.0 - -### New Features - -- Added support for acquiring tokens via MSI with a user assigned identity. - -## v9.5.3 - -### Bug Fixes - -- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. -- Set correct Content Type when using autorest.WithFormData. - -## v9.5.2 - -### Bug Fixes - -- Check for nil \*http.Response before dereferencing it. - -## v9.5.1 - -### Bug Fixes - -- Don't count http.StatusTooManyRequests (429) against the retry cap. -- Use retry logic when SkipResourceProviderRegistration is set to true. - -## v9.5.0 - -### New Features - -- Added support for username + password, API key, authoriazation code and cognitive services authentication. -- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. -- Added utility function AsStringSlice() to convert its parameters to a string slice. - -### Bug Fixes - -- When checking for authentication failures look at the error type not the status code as it could vary. - -## v9.4.2 - -### Bug Fixes - -- Validate parameters when creating credentials. -- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. - -## v9.4.1 - -### Bug Fixes - -- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this - environment variable is not set, it will fall back to use default path set by Azure CLI. -- Use case-insensitive string comparison for polling states. - -## v9.4.0 - -### New Features - -- Added WaitForCompletion() to Future as a default polling implementation. - -### Bug Fixes - -- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. - -## v9.3.1 - -### Bug Fixes - -- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. - -## v9.3.0 - -### New Features - -- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. -- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). - -## v9.2.0 - -### New Features - -- Added support for custom Azure Stack endpoints. -- Added type azure.Future used to track the status of long-running operations. - -### Bug Fixes - -- Preserve the original error in DoRetryWithRegistration when registration fails. - -## v9.1.1 - -- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. - -## v9.1.0 - -### New Features - -- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. -- Support for loading Azure CLI Authentication files. -- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. - -### Bug Fixes - -- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. -- Adding missing Apache Headers - -## v9.0.0 - -> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. - -Adding MSI Endpoint Support and CLI token rehydration. - -## v8.3.1 - -Pick up bug fix in adal for MSI support. - -## v8.3.0 - -Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. - -## v8.2.0 - -### New Features - -- Add support for bearer authentication callbacks -- Support 429 response codes that include "Retry-After" header -- Support validation constraint "Pattern" for map keys - -### Bug Fixes - -- Make RetriableRequest work with multiple versions of Go - -## v8.1.1 - -Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. - -## v8.1.0 - -Adds RetriableRequest type for more efficient handling of retrying HTTP requests. - -## v8.0.0 - -ADAL refactored into its own package. -Support for UNIX time. - -## v7.3.1 - -- Version Testing now removed from production bits that are shipped with the library. - -## v7.3.0 - -- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations - to acknowledge that they do not need either the entire or a trailing portion - of accepts response body. In doing so, Go's http library can reuse HTTP - connections more readily. -- Adding `PrepareDecorator` to target custom BaseURLs. -- Adding ACR suffix to public cloud environment. -- Updating Glide dependencies. - -## v7.2.5 - -- Fixed the Active Directory endpoint for the China cloud. -- Removes UTF-8 BOM if present in response payload. -- Added telemetry. - -## v7.2.3 - -- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay - duration. - -## v7.2.2 - -- autorest/azure: added ASM and ARM VM DNS suffixes. - -## v7.2.1 - -- fixed parsing of UTC times that are not RFC3339 conformant. - -## v7.2.0 - -- autorest/validation: Reformat validation error for better error message. - -## v7.1.0 - -- preparer: Added support for multipart formdata - WithMultiPartFormdata() -- preparer: Added support for sending file in request body - WithFile -- client: Added RetryDuration parameter. -- autorest/validation: new package for validation code for Azure Go SDK. - -## v7.0.7 - -- Add trailing / to endpoint -- azure: add EnvironmentFromName - -## v7.0.6 - -- Add retry logic for 408, 500, 502, 503 and 504 status codes. -- Change url path and query encoding logic. -- Fix DelayForBackoff for proper exponential delay. -- Add CookieJar in Client. - -## v7.0.5 - -- Add check to start polling only when status is in [200,201,202]. -- Refactoring for unchecked errors. -- azure/persist changes. -- Fix 'file in use' issue in renewing token in deviceflow. -- Store header RetryAfter for subsequent requests in polling. -- Add attribute details in service error. - -## v7.0.4 - -- Better error messages for long running operation failures - -## v7.0.3 - -- Corrected DoPollForAsynchronous to properly handle the initial response - -## v7.0.2 - -- Corrected DoPollForAsynchronous to continue using the polling method first discovered - -## v7.0.1 - -- Fixed empty JSON input error in ByUnmarshallingJSON -- Fixed polling support for GET calls -- Changed format name from TimeRfc1123 to TimeRFC1123 - -## v7.0.0 - -- Added ByCopying responder with supporting TeeReadCloser -- Rewrote Azure asynchronous handling -- Reverted to only unmarshalling JSON -- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format - -The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since -`encoding/json` successfully deserializes all core types, and extended types normally provide -their custom JSON serialization handlers, the code has been reverted back to using -`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate -code; there is no loss of function, and there is a gain in accuracy, by reverting. - -Additionally, Azure services indicate requests to be polled by multiple means. The existing code -only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). -The new code correctly covers all cases and aligns with the other Azure SDKs. - -## v6.1.0 - -- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. - -## v6.0.0 - -- Completely reworked the handling of polled and asynchronous requests -- Removed unnecessary routines -- Reworked `mocks.Sender` to replay a series of `http.Response` objects -- Added `PrepareDecorators` for primitive types (e.g., bool, int32) - -Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new -`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` -and `azure.DoPollForAsynchronous` for examples. - -## v5.0.0 - -- Added new RespondDecorators unmarshalling primitive types -- Corrected application of inspection and authorization PrependDecorators - -## v4.0.0 - -- Added support for Azure long-running operations. -- Added cancelation support to all decorators and functions that may delay. -- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. - -## v3.1.0 - -- Add support for OAuth Device Flow authorization. -- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. -- Add helpers for persisting and restoring Tokens. -- Increased code coverage in the github.com/Azure/autorest/azure package - -## v3.0.0 - -- Breaking: `NewErrorWithError` no longer takes `statusCode int`. -- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. -- Breaking: `Client#Send()` no longer takes `codes ...int` argument. -- Add: XML unmarshaling support with `ByUnmarshallingXML()` -- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). - Applications using this library should either use Glide or vendor dependencies locally some other way. -- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. -- Fix: use `net/http.DefaultClient` as base client. -- Fix: Missing inspection for polling responses added. -- Add: CopyAndDecode helpers. -- Improved `./autorest/to` with `[]string` helpers. -- Removed golint suppressions in .travis.yml. - -## v2.1.0 - -- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) - -## v2.0.0 - -- Changed `to.StringMapPtr` method signature to return a pointer -- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys - -## v1.0.0 - -- Added Logging inspectors to trace http.Request / Response -- Added support for User-Agent header -- Changed WithHeader PrepareDecorator to use set vs. add -- Added JSON to error when unmarshalling fails -- Added Client#Send method -- Corrected case of "Azure" in package paths -- Added "to" helpers, Azure helpers, and improved ease-of-use -- Corrected golint issues - -## v1.0.1 - -- Added CHANGELOG.md - -## v1.1.0 - -- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT -- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate - -## v1.1.1 - -- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile deleted file mode 100644 index a434e73..0000000 --- a/vendor/github.com/Azure/go-autorest/GNUmakefile +++ /dev/null @@ -1,23 +0,0 @@ -DIR?=./autorest/ - -default: build - -build: fmt - go install $(DIR) - -test: - go test $(DIR) || exit 1 - -vet: - @echo "go vet ." - @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ - echo ""; \ - echo "Vet found suspicious constructs. Please check the reported constructs"; \ - echo "and fix them if necessary before submitting the code for review."; \ - exit 1; \ - fi - -fmt: - gofmt -w $(DIR) - -.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock deleted file mode 100644 index dc6e3e6..0000000 --- a/vendor/github.com/Azure/go-autorest/Gopkg.lock +++ /dev/null @@ -1,324 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" - name = "contrib.go.opencensus.io/exporter/ocagent" - packages = ["."] - pruneopts = "UT" - revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" - version = "v0.6.0" - -[[projects]] - digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" - name = "github.com/census-instrumentation/opencensus-proto" - packages = [ - "gen-go/agent/common/v1", - "gen-go/agent/metrics/v1", - "gen-go/agent/trace/v1", - "gen-go/metrics/v1", - "gen-go/resource/v1", - "gen-go/trace/v1", - ] - pruneopts = "UT" - revision = "d89fa54de508111353cb0b06403c00569be780d8" - version = "v0.2.1" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - pruneopts = "UT" - revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" - version = "v3.2.0" - -[[projects]] - digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" - name = "github.com/dimchansky/utfbom" - packages = ["."] - pruneopts = "UT" - revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" - name = "github.com/golang/groupcache" - packages = ["lru"] - pruneopts = "UT" - revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" - -[[projects]] - digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" - name = "github.com/golang/protobuf" - packages = [ - "descriptor", - "jsonpb", - "proto", - "protoc-gen-go/descriptor", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/struct", - "ptypes/timestamp", - "ptypes/wrappers", - ] - pruneopts = "UT" - revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" - version = "v1.3.2" - -[[projects]] - digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" - name = "github.com/grpc-ecosystem/grpc-gateway" - packages = [ - "internal", - "runtime", - "utilities", - ] - pruneopts = "UT" - revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" - version = "v1.12.1" - -[[projects]] - digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - pruneopts = "UT" - revision = "af06845cf3004701891bf4fdb884bfe4920b3727" - version = "v1.1.0" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require", - ] - pruneopts = "UT" - revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" - version = "v1.4.0" - -[[projects]] - digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" - name = "go.opencensus.io" - packages = [ - ".", - "internal", - "internal/tagencoding", - "metric/metricdata", - "metric/metricproducer", - "plugin/ocgrpc", - "plugin/ochttp", - "plugin/ochttp/propagation/b3", - "plugin/ochttp/propagation/tracecontext", - "resource", - "stats", - "stats/internal", - "stats/view", - "tag", - "trace", - "trace/internal", - "trace/propagation", - "trace/tracestate", - ] - pruneopts = "UT" - revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" - version = "v0.22.2" - -[[projects]] - branch = "master" - digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" - name = "golang.org/x/crypto" - packages = [ - "pkcs12", - "pkcs12/internal/rc2", - ] - pruneopts = "UT" - revision = "e9b2fee46413994441b28dfca259d911d963dfed" - -[[projects]] - branch = "master" - digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" - name = "golang.org/x/lint" - packages = [ - ".", - "golint", - ] - pruneopts = "UT" - revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" - -[[projects]] - branch = "master" - digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" - name = "golang.org/x/net" - packages = [ - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace", - ] - pruneopts = "UT" - revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" - -[[projects]] - branch = "master" - digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" - name = "golang.org/x/sync" - packages = ["semaphore"] - pruneopts = "UT" - revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" - -[[projects]] - branch = "master" - digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "UT" - revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" - -[[projects]] - digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/language", - "internal/language/compact", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" - version = "v0.3.2" - -[[projects]] - branch = "master" - digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" - name = "golang.org/x/tools" - packages = [ - "go/ast/astutil", - "go/gcexportdata", - "go/internal/gcimporter", - "go/types/typeutil", - ] - pruneopts = "UT" - revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" - -[[projects]] - digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" - name = "google.golang.org/api" - packages = ["support/bundler"] - pruneopts = "UT" - revision = "8a410c21381766a810817fd6200fce8838ecb277" - version = "v0.14.0" - -[[projects]] - branch = "master" - digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" - name = "google.golang.org/genproto" - packages = [ - "googleapis/api/httpbody", - "googleapis/rpc/status", - "protobuf/field_mask", - ] - pruneopts = "UT" - revision = "51378566eb590fa106d1025ea12835a4416dda84" - -[[projects]] - digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" - name = "google.golang.org/grpc" - packages = [ - ".", - "backoff", - "balancer", - "balancer/base", - "balancer/roundrobin", - "binarylog/grpc_binarylog_v1", - "codes", - "connectivity", - "credentials", - "credentials/internal", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/balancerload", - "internal/binarylog", - "internal/buffer", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/grpcsync", - "internal/resolver/dns", - "internal/resolver/passthrough", - "internal/syscall", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "serviceconfig", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" - version = "v1.25.1" - -[[projects]] - digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" - version = "v2.2.7" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "contrib.go.opencensus.io/exporter/ocagent", - "github.com/dgrijalva/jwt-go", - "github.com/dimchansky/utfbom", - "github.com/mitchellh/go-homedir", - "github.com/stretchr/testify/require", - "go.opencensus.io/plugin/ochttp", - "go.opencensus.io/plugin/ochttp/propagation/tracecontext", - "go.opencensus.io/stats/view", - "go.opencensus.io/trace", - "golang.org/x/crypto/pkcs12", - "golang.org/x/lint/golint", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml deleted file mode 100644 index 1fc2865..0000000 --- a/vendor/github.com/Azure/go-autorest/Gopkg.toml +++ /dev/null @@ -1,59 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - -required = ["golang.org/x/lint/golint"] - -[prune] - go-tests = true - unused-packages = true - -[[constraint]] - name = "contrib.go.opencensus.io/exporter/ocagent" - version = "0.6.0" - -[[constraint]] - name = "github.com/dgrijalva/jwt-go" - version = "3.2.0" - -[[constraint]] - name = "github.com/dimchansky/utfbom" - version = "1.1.0" - -[[constraint]] - name = "github.com/mitchellh/go-homedir" - version = "1.1.0" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.3.0" - -[[constraint]] - name = "go.opencensus.io" - version = "0.22.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE deleted file mode 100644 index b9d6a27..0000000 --- a/vendor/github.com/Azure/go-autorest/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md deleted file mode 100644 index de1e19a..0000000 --- a/vendor/github.com/Azure/go-autorest/README.md +++ /dev/null @@ -1,165 +0,0 @@ -# go-autorest - -[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) -[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) -[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) - -Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. - -An authentication client tested with Azure Active Directory (AAD) is also -provided in this repo in the package -`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package -is maintained only as part of the Azure Go SDK and is not related to other -"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). - -## Overview - -Package go-autorest implements an HTTP request pipeline suitable for use across -multiple goroutines and provides the shared routines used by packages generated -by [Autorest](https://github.com/Azure/autorest.go). - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - -```go - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) -``` - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - -```go - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) -``` - -will set the URL to: - -``` - https://microsoft.com/a/b/c -``` - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., `ByUnmarshallingJson`) is likely incorrect. - -Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. - -## Helpers - -### Handling Swagger Dates - -The Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct -parsing and formatting. - -### Handling Empty Values - -In JSON, missing values have different semantics than empty values. This is especially true for -services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains -only those values to modify. Missing values are to be left unchanged. Developers, then, require a -means to both specify an empty value and to leave the value out of the submitted JSON. - -The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits -empty values from the rendered JSON. Since Go defines default values for all base types (such as "" -for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package -treats default values as meaning empty, omitting them from the rendered JSON. This means that, using -the Go base types encoded through the default JSON package, it is not possible to create JSON to -clear a value at the server. - -The workaround within the Go community is to use pointers to base types in lieu of base types within -structures that map to JSON. For example, instead of a value of type `string`, the workaround uses -`*string`. While this enables distinguishing empty values from those to be unchanged, creating -pointers to a base type (notably constant, in-line values) requires additional variables. This, for -example, - -```go - s := struct { - S *string - }{ S: &"foo" } -``` -fails, while, this - -```go - v := "foo" - s := struct { - S *string - }{ S: &v } -``` -succeeds. - -To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for -Go base types which have Swagger analogs. It also provides a helper that converts between -`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value -associated with a key should be cleared. With the helpers, the previous example becomes - -```go - s := struct { - S *string - }{ S: to.StringPtr("foo") } -``` - -## Install - -```bash -go get github.com/Azure/go-autorest/autorest -go get github.com/Azure/go-autorest/autorest/azure -go get github.com/Azure/go-autorest/autorest/date -go get github.com/Azure/go-autorest/autorest/to -``` - -### Using with Go Modules -In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. - -- autorest/adal -- autorest/azure/auth -- autorest/azure/cli -- autorest/date -- autorest/mocks -- autorest/to -- autorest/validation -- autorest -- logger -- tracing - -Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. - -## License - -See LICENSE file. - ------ - -This project has adopted the [Microsoft Open Source Code of -Conduct](https://opensource.microsoft.com/codeofconduct/). For more information -see the [Code of Conduct -FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional -questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE deleted file mode 100644 index b9d6a27..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE deleted file mode 100644 index b9d6a27..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md deleted file mode 100644 index b11eb07..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ /dev/null @@ -1,294 +0,0 @@ -# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `adal` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). - -# Azure Active Directory authentication for Go - -This is a standalone package for authenticating with Azure Active -Directory from other Go libraries and applications, in particular the [Azure SDK -for Go](https://github.com/Azure/azure-sdk-for-go). - -Note: Despite the package's name it is not related to other "ADAL" libraries -maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues -should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) -or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue -trackers. - -## Install - -```bash -go get -u github.com/Azure/go-autorest/autorest/adal -``` - -## Usage - -An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). - -### Register an Azure AD Application with secret - - -1. Register a new application with a `secret` credential - - ``` - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --password secret - ``` - -2. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "Application ID" - ``` - - * Replace `Application ID` with `appId` from step 1. - -### Register an Azure AD Application with certificate - -1. Create a private key - - ``` - openssl genrsa -out "example-app.key" 2048 - ``` - -2. Create the certificate - - ``` - openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" - openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 - ``` - -3. Create the PKCS12 version of the certificate containing also the private key - - ``` - openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: - - ``` - -4. Register a new application with the certificate content form `example-app.crt` - - ``` - certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" - - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --key-usage Verify --end-date 2018-01-01 \ - --key-value "${certificateContents}" - ``` - -5. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "APPLICATION_ID" - ``` - - * Replace `APPLICATION_ID` with `appId` from step 4. - - -### Grant the necessary permissions - -Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained -level. There is a set of [pre-defined roles](https://docs.microsoft.com/azure/active-directory/role-based-access-built-in-roles) -which can be assigned to a service principal of an Azure AD application depending of your needs. - -``` -az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" -``` - -* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. -* Replace the `ROLE_NAME` with a role name of your choice. - -It is also possible to define custom role definitions. - -``` -az role definition create --role-definition role-definition.json -``` - -* Check [custom roles](https://docs.microsoft.com/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. - - -### Acquire Access Token - -The common configuration used by all flows: - -```Go -const activeDirectoryEndpoint = "https://login.microsoftonline.com/" -tenantID := "TENANT_ID" -oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) - -applicationID := "APPLICATION_ID" - -callback := func(token adal.Token) error { - // This is called after the token is acquired -} - -// The resource for which the token is acquired -resource := "https://management.core.windows.net/" -``` - -* Replace the `TENANT_ID` with your tenant ID. -* Replace the `APPLICATION_ID` with the value from previous section. - -#### Client Credentials - -```Go -applicationSecret := "APPLICATION_SECRET" - -spt, err := adal.NewServicePrincipalToken( - *oauthConfig, - appliationID, - applicationSecret, - resource, - callbacks...) -if err != nil { - return nil, err -} - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Replace the `APPLICATION_SECRET` with the `password` value from previous section. - -#### Client Certificate - -```Go -certificatePath := "./example-app.pfx" - -certData, err := ioutil.ReadFile(certificatePath) -if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) -} - -// Get the certificate and private key from pfx file -certificate, rsaPrivateKey, err := decodePkcs12(certData, "") -if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) -} - -spt, err := adal.NewServicePrincipalTokenFromCertificate( - *oauthConfig, - applicationID, - certificate, - rsaPrivateKey, - resource, - callbacks...) - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Update the certificate path to point to the example-app.pfx file which was created in previous section. - - -#### Device Code - -```Go -oauthClient := &http.Client{} - -// Acquire the device code -deviceCode, err := adal.InitiateDeviceAuth( - oauthClient, - *oauthConfig, - applicationID, - resource) -if err != nil { - return nil, fmt.Errorf("Failed to start device auth flow: %s", err) -} - -// Display the authentication message -fmt.Println(*deviceCode.Message) - -// Wait here until the user is authenticated -token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) -if err != nil { - return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) -} - -spt, err := adal.NewServicePrincipalTokenFromManualToken( - *oauthConfig, - applicationID, - resource, - *token, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Username password authenticate - -```Go -spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( - *oauthConfig, - applicationID, - username, - password, - resource, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Authorization code authenticate - -``` Go -spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( - *oauthConfig, - applicationID, - clientSecret, - authorizationCode, - redirectURI, - resource, - callbacks...) - -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -### Command Line Tool - -A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. - -``` -adal -h - -Usage of ./adal: - -applicationId string - application id - -certificatePath string - path to pk12/PFC application certificate - -mode string - authentication mode (device, secret, cert, refresh) (default "device") - -resource string - resource for which the token is requested - -secret string - application secret - -tenantId string - tenant id - -tokenCachePath string - location of oath token cache (default "/home/cgc/.adal/accessToken.json") -``` - -Example acquire a token for `https://management.core.windows.net/` using device code flow: - -``` -adal -mode device \ - -applicationId "APPLICATION_ID" \ - -tenantId "TENANT_ID" \ - -resource https://management.core.windows.net/ - -``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go deleted file mode 100644 index fa59647..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ /dev/null @@ -1,151 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/url" -) - -const ( - activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorityEndpoint url.URL `json:"authorityEndpoint"` - AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` - TokenEndpoint url.URL `json:"tokenEndpoint"` - DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` -} - -// IsZero returns true if the OAuthConfig object is zero-initialized. -func (oac OAuthConfig) IsZero() bool { - return oac == OAuthConfig{} -} - -func validateStringParam(param, name string) error { - if len(param) == 0 { - return fmt.Errorf("parameter '" + name + "' cannot be empty") - } - return nil -} - -// NewOAuthConfig returns an OAuthConfig with tenant specific urls -func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - apiVer := "1.0" - return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) -} - -// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. -// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. -func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { - if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { - return nil, err - } - api := "" - // it's legal for tenantID to be empty so don't validate it - if apiVersion != nil { - if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { - return nil, err - } - api = fmt.Sprintf("?api-version=%s", *apiVersion) - } - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorityURL, err := u.Parse(tenantID) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorityEndpoint: *authorityURL, - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} - -// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. -type MultiTenantOAuthConfig interface { - PrimaryTenant() *OAuthConfig - AuxiliaryTenants() []*OAuthConfig -} - -// OAuthOptions contains optional OAuthConfig creation arguments. -type OAuthOptions struct { - APIVersion string -} - -func (c OAuthOptions) apiVersion() string { - if c.APIVersion != "" { - return fmt.Sprintf("?api-version=%s", c.APIVersion) - } - return "1.0" -} - -// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. -func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { - if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { - return nil, errors.New("must specify one to three auxiliary tenants") - } - mtCfg := multiTenantOAuthConfig{ - cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), - } - apiVer := options.apiVersion() - pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) - if err != nil { - return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) - } - mtCfg.cfgs[0] = pri - for i := range auxiliaryTenantIDs { - aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) - if err != nil { - return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) - } - mtCfg.cfgs[i+1] = aux - } - return mtCfg, nil -} - -type multiTenantOAuthConfig struct { - // first config in the slice is the primary tenant - cfgs []*OAuthConfig -} - -func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { - return m.cfgs[0] -} - -func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { - return m.cfgs[1:] -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go deleted file mode 100644 index 9daa4b5..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ /dev/null @@ -1,273 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - This file is largely based on rjw57/oauth2device's code, with the follow differences: - * scope -> resource, and only allow a single one - * receive "Message" in the DeviceCode struct and show it to users as the prompt - * azure-xplat-cli has the following behavior that this emulates: - - does not send client_secret during the token exchange - - sends resource again in the token exchange request -*/ - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - logPrefix = "autorest/adal/devicetoken:" -) - -var ( - // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow - ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) - - // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow - ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) - - // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow - ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) - - // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow - ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) - - // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow - ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) - - // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow - ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) - - // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow - ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) - - errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" - errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" - errTokenSendingFails = "Error occurred while sending request with device code for a token" - errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" - errStatusNotOK = "Error HTTP status != 200" -) - -// DeviceCode is the object returned by the device auth endpoint -// It contains information to instruct the user to complete the auth flow -type DeviceCode struct { - DeviceCode *string `json:"device_code,omitempty"` - UserCode *string `json:"user_code,omitempty"` - VerificationURL *string `json:"verification_url,omitempty"` - ExpiresIn *int64 `json:"expires_in,string,omitempty"` - Interval *int64 `json:"interval,string,omitempty"` - - Message *string `json:"message"` // Azure specific - Resource string // store the following, stored when initiating, used when exchanging - OAuthConfig OAuthConfig - ClientID string -} - -// TokenError is the object returned by the token exchange endpoint -// when something is amiss -type TokenError struct { - Error *string `json:"error,omitempty"` - ErrorCodes []int `json:"error_codes,omitempty"` - ErrorDescription *string `json:"error_description,omitempty"` - Timestamp *string `json:"timestamp,omitempty"` - TraceID *string `json:"trace_id,omitempty"` -} - -// DeviceToken is the object return by the token exchange endpoint -// It can either look like a Token or an ErrorToken, so put both here -// and check for presence of "Error" to know if we are in error state -type deviceToken struct { - Token - TokenError -} - -// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -// Deprecated: use InitiateDeviceAuthWithContext() instead. -func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) -} - -// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - v := url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req.WithContext(ctx)) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) - } - - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrDeviceCodeEmpty - } - - var code DeviceCode - err = json.Unmarshal(rb, &code) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - code.ClientID = clientID - code.Resource = resource - code.OAuthConfig = oauthConfig - - return &code, nil -} - -// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -// Deprecated: use CheckForUserCompletionWithContext() instead. -func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - return CheckForUserCompletionWithContext(context.Background(), sender, code) -} - -// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { - v := url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req.WithContext(ctx)) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrOAuthTokenEmpty - } - - var token deviceToken - err = json.Unmarshal(rb, &token) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if token.Error == nil { - return &token.Token, nil - } - - switch *token.Error { - case "authorization_pending": - return nil, ErrDeviceAuthorizationPending - case "slow_down": - return nil, ErrDeviceSlowDown - case "access_denied": - return nil, ErrDeviceAccessDenied - case "code_expired": - return nil, ErrDeviceCodeExpired - default: - // return a more meaningful error message if available - if token.ErrorDescription != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription) - } - return nil, ErrDeviceGeneric - } -} - -// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. -// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -// Deprecated: use WaitForUserCompletionWithContext() instead. -func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - return WaitForUserCompletionWithContext(context.Background(), sender, code) -} - -// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error -// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { - intervalDuration := time.Duration(*code.Interval) * time.Second - waitDuration := intervalDuration - - for { - token, err := CheckForUserCompletionWithContext(ctx, sender, code) - - if err == nil { - return token, nil - } - - switch err { - case ErrDeviceSlowDown: - waitDuration += waitDuration - case ErrDeviceAuthorizationPending: - // noop - default: // everything else is "fatal" to us - return nil, err - } - - if waitDuration > (intervalDuration * 3) { - return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) - } - - select { - case <-time.After(waitDuration): - // noop - case <-ctx.Done(): - return nil, ctx.Err() - } - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go deleted file mode 100644 index 647a61b..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build modhack -// +build modhack - -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go deleted file mode 100644 index 2a974a3..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ /dev/null @@ -1,135 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "golang.org/x/crypto/pkcs12" -) - -var ( - // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data. - ErrMissingCertificate = errors.New("adal: certificate missing") - - // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data. - ErrMissingPrivateKey = errors.New("adal: private key missing") -) - -// LoadToken restores a Token object from a file located at 'path'. -func LoadToken(path string) (*Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var token Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&token); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) - } - return &token, nil -} - -// SaveToken persists an oauth token at the given location on disk. -// It moves the new file into place so it can safely be used to replace an existing file -// that maybe accessed by multiple processes. -func SaveToken(path string, mode os.FileMode, token Token) error { - dir := filepath.Dir(path) - err := os.MkdirAll(dir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) - } - - newFile, err := ioutil.TempFile(dir, "token") - if err != nil { - return fmt.Errorf("failed to create the temp file to write the token: %v", err) - } - tempPath := newFile.Name() - - if err := json.NewEncoder(newFile).Encode(token); err != nil { - return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) - } - if err := newFile.Close(); err != nil { - return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) - } - - // Atomic replace to avoid multi-writer file corruptions - if err := os.Rename(tempPath, path); err != nil { - return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) - } - if err := os.Chmod(path, mode); err != nil { - return fmt.Errorf("failed to chmod the token file %s: %v", path, err) - } - return nil -} - -// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data. -// The PFX data must contain a private key along with a certificate whose public key matches that of the -// private key or an error is returned. -// If the private key is not password protected pass the empty string for password. -func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { - blocks, err := pkcs12.ToPEM(pfxData, password) - if err != nil { - return nil, nil, err - } - // first extract the private key - var priv *rsa.PrivateKey - for _, block := range blocks { - if block.Type == "PRIVATE KEY" { - priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, nil, err - } - break - } - } - if priv == nil { - return nil, nil, ErrMissingPrivateKey - } - // now find the certificate with the matching public key of our private key - var cert *x509.Certificate - for _, block := range blocks { - if block.Type == "CERTIFICATE" { - pcert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, nil, err - } - certKey, ok := pcert.PublicKey.(*rsa.PublicKey) - if !ok { - // keep looking - continue - } - if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 { - // found a match - cert = pcert - break - } - } - } - if cert == nil { - return nil, nil, ErrMissingCertificate - } - return cert, priv, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go deleted file mode 100644 index eb649bc..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ /dev/null @@ -1,101 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/tls" - "net" - "net/http" - "net/http/cookiejar" - "sync" - "time" - - "github.com/Azure/go-autorest/tracing" -) - -const ( - contentType = "Content-Type" - mimeTypeFormPost = "application/x-www-form-urlencoded" -) - -// DO NOT ACCESS THIS DIRECTLY. go through sender() -var defaultSender Sender -var defaultSenderInit = &sync.Once{} - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(sender(), decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -func sender() Sender { - // note that we can't init defaultSender in init() since it will - // execute before calling code has had a chance to enable tracing - defaultSenderInit.Do(func() { - // copied from http.DefaultTransport with a TLS minimum version. - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - }, - } - var roundTripper http.RoundTripper = transport - if tracing.IsEnabled() { - roundTripper = tracing.NewTransport(transport) - } - j, _ := cookiejar.New(nil) - defaultSender = &http.Client{Jar: j, Transport: roundTripper} - }) - return defaultSender -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go deleted file mode 100644 index 1a9c8ab..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ /dev/null @@ -1,1396 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/logger" - "github.com/golang-jwt/jwt/v4" -) - -const ( - defaultRefresh = 5 * time.Minute - - // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow - OAuthGrantTypeDeviceCode = "device_code" - - // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows - OAuthGrantTypeClientCredentials = "client_credentials" - - // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows - OAuthGrantTypeUserPass = "password" - - // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows - OAuthGrantTypeRefreshToken = "refresh_token" - - // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows - OAuthGrantTypeAuthorizationCode = "authorization_code" - - // metadataHeader is the header required by MSI extension - metadataHeader = "Metadata" - - // msiEndpoint is the well known endpoint for getting MSI authentications tokens - msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - - // the API version to use for the MSI endpoint - msiAPIVersion = "2018-02-01" - - // the default number of attempts to refresh an MSI authentication token - defaultMaxMSIRefreshAttempts = 5 - - // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions - msiEndpointEnv = "MSI_ENDPOINT" - - // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions - msiSecretEnv = "MSI_SECRET" - - // the API version to use for the legacy App Service MSI endpoint - appServiceAPIVersion2017 = "2017-09-01" - - // secret header used when authenticating against app service MSI endpoint - secretHeader = "Secret" - - // the format for expires_on in UTC with AM/PM - expiresOnDateFormatPM = "1/2/2006 15:04:05 PM +00:00" - - // the format for expires_on in UTC without AM/PM - expiresOnDateFormat = "1/2/2006 15:04:05 +00:00" -) - -// OAuthTokenProvider is an interface which should be implemented by an access token retriever -type OAuthTokenProvider interface { - OAuthToken() string -} - -// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. -type MultitenantOAuthTokenProvider interface { - PrimaryOAuthToken() string - AuxiliaryOAuthTokens() []string -} - -// TokenRefreshError is an interface used by errors returned during token refresh. -type TokenRefreshError interface { - error - Response() *http.Response -} - -// Refresher is an interface for token refresh functionality -type Refresher interface { - Refresh() error - RefreshExchange(resource string) error - EnsureFresh() error -} - -// RefresherWithContext is an interface for token refresh functionality -type RefresherWithContext interface { - RefreshWithContext(ctx context.Context) error - RefreshExchangeWithContext(ctx context.Context, resource string) error - EnsureFreshWithContext(ctx context.Context) error -} - -// TokenRefreshCallback is the type representing callbacks that will be called after -// a successful token refresh -type TokenRefreshCallback func(Token) error - -// TokenRefresh is a type representing a custom callback to refresh a token -type TokenRefresh func(ctx context.Context, resource string) (*Token, error) - -// Token encapsulates the access token used to authorize Azure requests. -// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response -type Token struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - ExpiresIn json.Number `json:"expires_in"` - ExpiresOn json.Number `json:"expires_on"` - NotBefore json.Number `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -func newToken() Token { - return Token{ - ExpiresIn: "0", - ExpiresOn: "0", - NotBefore: "0", - } -} - -// IsZero returns true if the token object is zero-initialized. -func (t Token) IsZero() bool { - return t == Token{} -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := t.ExpiresOn.Float64() - if err != nil { - s = -3600 - } - - expiration := date.NewUnixTimeFromSeconds(s) - - return time.Time(expiration).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -//OAuthToken return the current access token -func (t *Token) OAuthToken() string { - return t.AccessToken -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalNoSecret represents a secret type that contains no secret -// meaning it is not valid for fetching a fresh token. This is used by Manual -type ServicePrincipalNoSecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret -// It only returns an error for the ServicePrincipalNoSecret type -func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") -} - -// MarshalJSON implements the json.Marshaler interface. -func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalNoSecret", - }) -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string `json:"value"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalTokenSecret", - Value: tokenSecret.ClientSecret, - }) -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - hasher := sha1.New() - _, err := hasher.Write(secret.Certificate.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} - token.Header["x5c"] = x5c - token.Claims = jwt.MapClaims{ - "aud": spt.inner.OauthConfig.TokenEndpoint.String(), - "iss": spt.inner.ClientID, - "sub": spt.inner.ClientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(24 * time.Hour).Unix(), - } - - signedString, err := token.SignedString(secret.PrivateKey) - return signedString, err -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") -} - -// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. -type ServicePrincipalMSISecret struct { - msiType msiType - clientResourceID string -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") -} - -// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. -type ServicePrincipalUsernamePasswordSecret struct { - Username string `json:"username"` - Password string `json:"password"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("username", secret.Username) - v.Set("password", secret.Password) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Username string `json:"username"` - Password string `json:"password"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalUsernamePasswordSecret", - Username: secret.Username, - Password: secret.Password, - }) -} - -// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. -type ServicePrincipalAuthorizationCodeSecret struct { - ClientSecret string `json:"value"` - AuthorizationCode string `json:"authCode"` - RedirectURI string `json:"redirect"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("code", secret.AuthorizationCode) - v.Set("client_secret", secret.ClientSecret) - v.Set("redirect_uri", secret.RedirectURI) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - AuthCode string `json:"authCode"` - Redirect string `json:"redirect"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalAuthorizationCodeSecret", - Value: secret.ClientSecret, - AuthCode: secret.AuthorizationCode, - Redirect: secret.RedirectURI, - }) -} - -// ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs. -type ServicePrincipalFederatedSecret struct { - jwt string -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer. -func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - - v.Set("client_assertion", secret.jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalFederatedSecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalFederatedSecret is not supported") -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - inner servicePrincipalToken - refreshLock *sync.RWMutex - sender Sender - customRefreshFunc TokenRefresh - refreshCallbacks []TokenRefreshCallback - // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. - // Settings this to a value less than 1 will use the default value. - MaxMSIRefreshAttempts int -} - -// MarshalTokenJSON returns the marshalled inner token. -func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { - return json.Marshal(spt.inner.Token) -} - -// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. -func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { - spt.refreshCallbacks = callbacks -} - -// SetCustomRefreshFunc sets a custom refresh function used to refresh the token. -func (spt *ServicePrincipalToken) SetCustomRefreshFunc(customRefreshFunc TokenRefresh) { - spt.customRefreshFunc = customRefreshFunc -} - -// MarshalJSON implements the json.Marshaler interface. -func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { - return json.Marshal(spt.inner) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { - // need to determine the token type - raw := map[string]interface{}{} - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - secret := raw["secret"].(map[string]interface{}) - switch secret["type"] { - case "ServicePrincipalNoSecret": - spt.inner.Secret = &ServicePrincipalNoSecret{} - case "ServicePrincipalTokenSecret": - spt.inner.Secret = &ServicePrincipalTokenSecret{} - case "ServicePrincipalCertificateSecret": - return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") - case "ServicePrincipalMSISecret": - return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") - case "ServicePrincipalUsernamePasswordSecret": - spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} - case "ServicePrincipalAuthorizationCodeSecret": - spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} - case "ServicePrincipalFederatedSecret": - return errors.New("unmarshalling ServicePrincipalFederatedSecret is not supported") - default: - return fmt.Errorf("unrecognized token type '%s'", secret["type"]) - } - err = json.Unmarshal(data, &spt.inner) - if err != nil { - return err - } - // Don't override the refreshLock or the sender if those have been already set. - if spt.refreshLock == nil { - spt.refreshLock = &sync.RWMutex{} - } - if spt.sender == nil { - spt.sender = sender() - } - return nil -} - -// internal type used for marshalling/unmarshalling -type servicePrincipalToken struct { - Token Token `json:"token"` - Secret ServicePrincipalSecret `json:"secret"` - OauthConfig OAuthConfig `json:"oauth"` - ClientID string `json:"clientID"` - Resource string `json:"resource"` - AutoRefresh bool `json:"autoRefresh"` - RefreshWithin time.Duration `json:"refreshWithin"` -} - -func validateOAuthConfig(oac OAuthConfig) error { - if oac.IsZero() { - return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") - } - return nil -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(id, "id"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: oauthConfig, - Secret: secret, - ClientID: id, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: sender(), - refreshCallbacks: callbacks, - } - return spt, nil -} - -// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token -func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalNoSecret{}, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret -func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - secret, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if certificate == nil { - return nil, fmt.Errorf("parameter 'certificate' cannot be nil") - } - if privateKey == nil { - return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. -func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(username, "username"); err != nil { - return nil, err - } - if err := validateStringParam(password, "password"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalUsernamePasswordSecret{ - Username: username, - Password: password, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the -func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(clientSecret, "clientSecret"); err != nil { - return nil, err - } - if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { - return nil, err - } - if err := validateStringParam(redirectURI, "redirectURI"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalAuthorizationCodeSecret{ - ClientSecret: clientSecret, - AuthorizationCode: authorizationCode, - RedirectURI: redirectURI, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT. -func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if jwt == "" { - return nil, fmt.Errorf("parameter 'jwt' cannot be empty") - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalFederatedSecret{ - jwt: jwt, - }, - callbacks..., - ) -} - -type msiType int - -const ( - msiTypeUnavailable msiType = iota - msiTypeAppServiceV20170901 - msiTypeCloudShell - msiTypeIMDS -) - -func (m msiType) String() string { - switch m { - case msiTypeAppServiceV20170901: - return "AppServiceV20170901" - case msiTypeCloudShell: - return "CloudShell" - case msiTypeIMDS: - return "IMDS" - default: - return fmt.Sprintf("unhandled MSI type %d", m) - } -} - -// returns the MSI type and endpoint, or an error -func getMSIType() (msiType, string, error) { - if endpointEnvVar := os.Getenv(msiEndpointEnv); endpointEnvVar != "" { - // if the env var MSI_ENDPOINT is set - if secretEnvVar := os.Getenv(msiSecretEnv); secretEnvVar != "" { - // if BOTH the env vars MSI_ENDPOINT and MSI_SECRET are set the msiType is AppService - return msiTypeAppServiceV20170901, endpointEnvVar, nil - } - // if ONLY the env var MSI_ENDPOINT is set the msiType is CloudShell - return msiTypeCloudShell, endpointEnvVar, nil - } - // if MSI_ENDPOINT is NOT set assume the msiType is IMDS - return msiTypeIMDS, msiEndpoint, nil -} - -// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. -// NOTE: this always returns the IMDS endpoint, it does not work for app services or cloud shell. -// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. -func GetMSIVMEndpoint() (string, error) { - return msiEndpoint, nil -} - -// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions. -// It will return an error when not running in an app service/functions environment. -// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. -func GetMSIAppServiceEndpoint() (string, error) { - msiType, endpoint, err := getMSIType() - if err != nil { - return "", err - } - switch msiType { - case msiTypeAppServiceV20170901: - return endpoint, nil - default: - return "", fmt.Errorf("%s is not app service environment", msiType) - } -} - -// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment -// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. -func GetMSIEndpoint() (string, error) { - _, endpoint, err := getMSIType() - return endpoint, err -} - -// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the system assigned identity when creating the token. -// msiEndpoint - empty string, or pass a non-empty string to override the default value. -// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. -func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", "", callbacks...) -} - -// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the clientID of specified user assigned identity when creating the token. -// msiEndpoint - empty string, or pass a non-empty string to override the default value. -// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. -func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(userAssignedID, "userAssignedID"); err != nil { - return nil, err - } - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, "", callbacks...) -} - -// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the azure resource id of user assigned identity when creating the token. -// msiEndpoint - empty string, or pass a non-empty string to override the default value. -// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. -func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(identityResourceID, "identityResourceID"); err != nil { - return nil, err - } - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", identityResourceID, callbacks...) -} - -// ManagedIdentityOptions contains optional values for configuring managed identity authentication. -type ManagedIdentityOptions struct { - // ClientID is the user-assigned identity to use during authentication. - // It is mutually exclusive with IdentityResourceID. - ClientID string - - // IdentityResourceID is the resource ID of the user-assigned identity to use during authentication. - // It is mutually exclusive with ClientID. - IdentityResourceID string -} - -// NewServicePrincipalTokenFromManagedIdentity creates a ServicePrincipalToken using a managed identity. -// It supports the following managed identity environments. -// - App Service Environment (API version 2017-09-01 only) -// - Cloud shell -// - IMDS with a system or user assigned identity -func NewServicePrincipalTokenFromManagedIdentity(resource string, options *ManagedIdentityOptions, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if options == nil { - options = &ManagedIdentityOptions{} - } - return newServicePrincipalTokenFromMSI("", resource, options.ClientID, options.IdentityResourceID, callbacks...) -} - -func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if userAssignedID != "" && identityResourceID != "" { - return nil, errors.New("cannot specify userAssignedID and identityResourceID") - } - msiType, endpoint, err := getMSIType() - if err != nil { - logger.Instance.Writef(logger.LogError, "Error determining managed identity environment: %v\n", err) - return nil, err - } - logger.Instance.Writef(logger.LogInfo, "Managed identity environment is %s, endpoint is %s\n", msiType, endpoint) - if msiEndpoint != "" { - endpoint = msiEndpoint - logger.Instance.Writef(logger.LogInfo, "Managed identity custom endpoint is %s\n", endpoint) - } - msiEndpointURL, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - // cloud shell sends its data in the request body - if msiType != msiTypeCloudShell { - v := url.Values{} - v.Set("resource", resource) - clientIDParam := "client_id" - switch msiType { - case msiTypeAppServiceV20170901: - clientIDParam = "clientid" - v.Set("api-version", appServiceAPIVersion2017) - break - case msiTypeIMDS: - v.Set("api-version", msiAPIVersion) - } - if userAssignedID != "" { - v.Set(clientIDParam, userAssignedID) - } else if identityResourceID != "" { - v.Set("mi_res_id", identityResourceID) - } - msiEndpointURL.RawQuery = v.Encode() - } - - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: OAuthConfig{ - TokenEndpoint: *msiEndpointURL, - }, - Secret: &ServicePrincipalMSISecret{ - msiType: msiType, - clientResourceID: identityResourceID, - }, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - ClientID: userAssignedID, - }, - refreshLock: &sync.RWMutex{}, - sender: sender(), - refreshCallbacks: callbacks, - MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, - } - - return spt, nil -} - -// internal type that implements TokenRefreshError -type tokenRefreshError struct { - message string - resp *http.Response -} - -// Error implements the error interface which is part of the TokenRefreshError interface. -func (tre tokenRefreshError) Error() string { - return tre.message -} - -// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. -func (tre tokenRefreshError) Response() *http.Response { - return tre.resp -} - -func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { - return tokenRefreshError{message: message, resp: resp} -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFresh() error { - return spt.EnsureFreshWithContext(context.Background()) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - // must take the read lock when initially checking the token's expiration - if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) { - // take the write lock then check again to see if the token was already refreshed - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - return spt.refreshInternal(ctx, spt.inner.Resource) - } - } - return nil -} - -// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization -func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { - if spt.refreshCallbacks != nil { - for _, callback := range spt.refreshCallbacks { - err := callback(spt.inner.Token) - if err != nil { - return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) - } - } - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -// This method is safe for concurrent use. -func (spt *ServicePrincipalToken) Refresh() error { - return spt.RefreshWithContext(context.Background()) -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -// This method is safe for concurrent use. -func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, spt.inner.Resource) -} - -// RefreshExchange refreshes the token, but for a different resource. -// This method is safe for concurrent use. -func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.RefreshExchangeWithContext(context.Background(), resource) -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -// This method is safe for concurrent use. -func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, resource) -} - -func (spt *ServicePrincipalToken) getGrantType() string { - switch spt.inner.Secret.(type) { - case *ServicePrincipalUsernamePasswordSecret: - return OAuthGrantTypeUserPass - case *ServicePrincipalAuthorizationCodeSecret: - return OAuthGrantTypeAuthorizationCode - default: - return OAuthGrantTypeClientCredentials - } -} - -func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { - if spt.customRefreshFunc != nil { - token, err := spt.customRefreshFunc(ctx, resource) - if err != nil { - return err - } - spt.inner.Token = *token - return spt.InvokeRefreshCallbacks(spt.inner.Token) - } - req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) - if err != nil { - return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) - } - req.Header.Add("User-Agent", UserAgent()) - req = req.WithContext(ctx) - var resp *http.Response - authBodyFilter := func(b []byte) []byte { - if logger.Level() != logger.LogAuth { - return []byte("**REDACTED** authentication body") - } - return b - } - if msiSecret, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { - switch msiSecret.msiType { - case msiTypeAppServiceV20170901: - req.Method = http.MethodGet - req.Header.Set("secret", os.Getenv(msiSecretEnv)) - break - case msiTypeCloudShell: - req.Header.Set("Metadata", "true") - data := url.Values{} - data.Set("resource", spt.inner.Resource) - if spt.inner.ClientID != "" { - data.Set("client_id", spt.inner.ClientID) - } else if msiSecret.clientResourceID != "" { - data.Set("msi_res_id", msiSecret.clientResourceID) - } - req.Body = ioutil.NopCloser(strings.NewReader(data.Encode())) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - break - case msiTypeIMDS: - req.Method = http.MethodGet - req.Header.Set("Metadata", "true") - break - } - logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) - resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) - } else { - v := url.Values{} - v.Set("client_id", spt.inner.ClientID) - v.Set("resource", resource) - - if spt.inner.Token.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.inner.Token.RefreshToken) - // web apps must specify client_secret when refreshing tokens - // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens - if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - } else { - v.Set("grant_type", spt.getGrantType()) - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - req.Body = body - logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) - resp, err = spt.sender.Do(req) - } - - // don't return a TokenRefreshError here; this will allow retry logic to apply - if err != nil { - return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) - } else if resp == nil { - return fmt.Errorf("adal: received nil response and error") - } - - logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter}) - defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != http.StatusOK { - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v Endpoint %s", resp.StatusCode, err, req.URL.String()), resp) - } - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s Endpoint %s", resp.StatusCode, string(rb), req.URL.String()), resp) - } - - // for the following error cases don't return a TokenRefreshError. the operation succeeded - // but some transient failure happened during deserialization. by returning a generic error - // the retry logic will kick in (we don't retry on TokenRefreshError). - - if err != nil { - return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return fmt.Errorf("adal: Empty service principal token received during refresh") - } - token := struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - // AAD returns expires_in as a string, ADFS returns it as an int - ExpiresIn json.Number `json:"expires_in"` - // expires_on can be in three formats, a UTC time stamp, or the number of seconds as a string *or* int. - ExpiresOn interface{} `json:"expires_on"` - NotBefore json.Number `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` - }{} - // return a TokenRefreshError in the follow error cases as the token is in an unexpected format - err = json.Unmarshal(rb, &token) - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)), resp) - } - expiresOn := json.Number("") - // ADFS doesn't include the expires_on field - if token.ExpiresOn != nil { - if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp) - } - } - spt.inner.Token.AccessToken = token.AccessToken - spt.inner.Token.RefreshToken = token.RefreshToken - spt.inner.Token.ExpiresIn = token.ExpiresIn - spt.inner.Token.ExpiresOn = expiresOn - spt.inner.Token.NotBefore = token.NotBefore - spt.inner.Token.Resource = token.Resource - spt.inner.Token.Type = token.Type - - return spt.InvokeRefreshCallbacks(spt.inner.Token) -} - -// converts expires_on to the number of seconds -func parseExpiresOn(s interface{}) (json.Number, error) { - // the JSON unmarshaler treats JSON numbers unmarshaled into an interface{} as float64 - asFloat64, ok := s.(float64) - if ok { - // this is the number of seconds as int case - return json.Number(strconv.FormatInt(int64(asFloat64), 10)), nil - } - asStr, ok := s.(string) - if !ok { - return "", fmt.Errorf("unexpected expires_on type %T", s) - } - // convert the expiration date to the number of seconds from the unix epoch - timeToDuration := func(t time.Time) json.Number { - return json.Number(strconv.FormatInt(t.UTC().Unix(), 10)) - } - if _, err := json.Number(asStr).Int64(); err == nil { - // this is the number of seconds case, no conversion required - return json.Number(asStr), nil - } else if eo, err := time.Parse(expiresOnDateFormatPM, asStr); err == nil { - return timeToDuration(eo), nil - } else if eo, err := time.Parse(expiresOnDateFormat, asStr); err == nil { - return timeToDuration(eo), nil - } else { - // unknown format - return json.Number(""), err - } -} - -// retry logic specific to retrieving a token from the IMDS endpoint -func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { - // copied from client.go due to circular dependency - retries := []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } - // extra retry status codes specific to IMDS - retries = append(retries, - http.StatusNotFound, - http.StatusGone, - // all remaining 5xx - http.StatusNotImplemented, - http.StatusHTTPVersionNotSupported, - http.StatusVariantAlsoNegotiates, - http.StatusInsufficientStorage, - http.StatusLoopDetected, - http.StatusNotExtended, - http.StatusNetworkAuthenticationRequired) - - // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance - - const maxDelay time.Duration = 60 * time.Second - - attempt := 0 - delay := time.Duration(0) - - // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made - if maxAttempts < 1 { - maxAttempts = defaultMaxMSIRefreshAttempts - } - - for attempt < maxAttempts { - if resp != nil && resp.Body != nil { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - } - resp, err = sender.Do(req) - // we want to retry if err is not nil or the status code is in the list of retry codes - if err == nil && !responseHasStatusCode(resp, retries...) { - return - } - - // perform exponential backoff with a cap. - // must increment attempt before calculating delay. - attempt++ - // the base value of 2 is the "delta backoff" as specified in the guidance doc - delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) - if delay > maxDelay { - delay = maxDelay - } - - select { - case <-time.After(delay): - // intentionally left blank - case <-req.Context().Done(): - err = req.Context().Err() - return - } - } - return -} - -func responseHasStatusCode(resp *http.Response, codes ...int) bool { - if resp != nil { - for _, i := range codes { - if i == resp.StatusCode { - return true - } - } - } - return false -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.inner.AutoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.inner.RefreshWithin = d - return -} - -// SetSender sets the http.Client used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } - -// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. -func (spt *ServicePrincipalToken) OAuthToken() string { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token.OAuthToken() -} - -// Token returns a copy of the current token. -func (spt *ServicePrincipalToken) Token() Token { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token -} - -// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. -type MultiTenantServicePrincipalToken struct { - PrimaryToken *ServicePrincipalToken - AuxiliaryTokens []*ServicePrincipalToken -} - -// PrimaryOAuthToken returns the primary authorization token. -func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { - return mt.PrimaryToken.OAuthToken() -} - -// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. -func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { - tokens := make([]string, len(mt.AuxiliaryTokens)) - for i := range mt.AuxiliaryTokens { - tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() - } - return tokens -} - -// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. -func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - auxTenants := multiTenantCfg.AuxiliaryTenants() - m := MultiTenantServicePrincipalToken{ - AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), - } - primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) - } - m.PrimaryToken = primary - for i := range auxTenants { - aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) - } - m.AuxiliaryTokens[i] = aux - } - return &m, nil -} - -// NewMultiTenantServicePrincipalTokenFromCertificate creates a new MultiTenantServicePrincipalToken with the specified certificate credentials and resource. -func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTenantOAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string) (*MultiTenantServicePrincipalToken, error) { - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if certificate == nil { - return nil, fmt.Errorf("parameter 'certificate' cannot be nil") - } - if privateKey == nil { - return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") - } - auxTenants := multiTenantCfg.AuxiliaryTenants() - m := MultiTenantServicePrincipalToken{ - AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), - } - primary, err := NewServicePrincipalTokenWithSecret( - *multiTenantCfg.PrimaryTenant(), - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) - } - m.PrimaryToken = primary - for i := range auxTenants { - aux, err := NewServicePrincipalTokenWithSecret( - *auxTenants[i], - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) - } - m.AuxiliaryTokens[i] = aux - } - return &m, nil -} - -// MSIAvailable returns true if the MSI endpoint is available for authentication. -func MSIAvailable(ctx context.Context, s Sender) bool { - msiType, _, err := getMSIType() - - if err != nil { - return false - } - - if msiType != msiTypeIMDS { - return true - } - - if s == nil { - s = sender() - } - - resp, err := getMSIEndpoint(ctx, s) - - if err == nil { - resp.Body.Close() - } - - return err == nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go deleted file mode 100644 index 89190a4..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build go1.13 -// +build go1.13 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adal - -import ( - "context" - "fmt" - "net/http" - "time" -) - -func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { - tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - // http.NewRequestWithContext() was added in Go 1.13 - req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil) - q := req.URL.Query() - q.Add("api-version", msiAPIVersion) - req.URL.RawQuery = q.Encode() - return sender.Do(req) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh primary token: %w", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.EnsureFreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %w", err) - } - } - return nil -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh primary token: %w", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %w", err) - } - } - return nil -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { - return fmt.Errorf("failed to refresh primary token: %w", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %w", err) - } - } - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go deleted file mode 100644 index 27ec4ef..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go +++ /dev/null @@ -1,75 +0,0 @@ -//go:build !go1.13 -// +build !go1.13 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adal - -import ( - "context" - "net/http" - "time" -) - -func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { - tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) - req = req.WithContext(tempCtx) - q := req.URL.Query() - q.Add("api-version", msiAPIVersion) - req.URL.RawQuery = q.Encode() - return sender.Do(req) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { - return err - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.EnsureFreshWithContext(ctx); err != nil { - return err - } - } - return nil -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { - return err - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshWithContext(ctx); err != nil { - return err - } - } - return nil -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { - return err - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go deleted file mode 100644 index c867b34..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go +++ /dev/null @@ -1,45 +0,0 @@ -package adal - -import ( - "fmt" - "runtime" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const number = "v1.0.0" - -var ( - ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. -func UserAgent() string { - return ua -} - -// AddToUserAgent adds an extension to the current user agent -func AddToUserAgent(extension string) error { - if extension != "" { - ua = fmt.Sprintf("%s %s", ua, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go deleted file mode 100644 index 1226c41..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ /dev/null @@ -1,353 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/tls" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -const ( - bearerChallengeHeader = "Www-Authenticate" - bearer = "Bearer" - tenantID = "tenantID" - apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" - bingAPISdkHeader = "X-BingApis-SDK-Client" - golangBingAPISdkHeaderValue = "Go-SDK" - authorization = "Authorization" - basic = "Basic" -) - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} - -// APIKeyAuthorizer implements API Key authorization. -type APIKeyAuthorizer struct { - headers map[string]interface{} - queryParameters map[string]interface{} -} - -// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(headers, nil) -} - -// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. -func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(nil, queryParameters) -} - -// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { - return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. -func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) - } -} - -// CognitiveServicesAuthorizer implements authorization for Cognitive Services. -type CognitiveServicesAuthorizer struct { - subscriptionKey string -} - -// NewCognitiveServicesAuthorizer is -func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { - return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} -} - -// WithAuthorization is -func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[apiKeyAuthorizerHeader] = csa.subscriptionKey - headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BearerAuthorizer implements the bearer authorization -type BearerAuthorizer struct { - tokenProvider adal.OAuthTokenProvider -} - -// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider -func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { - return &BearerAuthorizer{tokenProvider: tp} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the token. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // the ordering is important here, prefer RefresherWithContext if available - if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { - err = refresher.EnsureFresh() - } - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, - "Failed to refresh the Token for request to %s", r.URL) - } - return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) - } - return r, err - }) - } -} - -// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST. -func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider { - return ba.tokenProvider -} - -// BearerAuthorizerCallbackFunc is the authentication callback signature. -type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) - -// BearerAuthorizerCallback implements bearer authorization via a callback. -type BearerAuthorizerCallback struct { - sender Sender - callback BearerAuthorizerCallbackFunc -} - -// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback -// is invoked when the HTTP request is submitted. -func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { - if s == nil { - s = sender(tls.RenegotiateNever) - } - return &BearerAuthorizerCallback{sender: s, callback: callback} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value -// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // make a copy of the request and remove the body as it's not - // required and avoids us having to create a copy of it. - rCopy := *r - removeRequestBody(&rCopy) - - resp, err := bacb.sender.Do(&rCopy) - if err != nil { - return r, err - } - DrainResponseBody(resp) - if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) { - bc, err := newBearerChallenge(resp.Header) - if err != nil { - return r, err - } - if bacb.callback != nil { - ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) - if err != nil { - return r, err - } - return Prepare(r, ba.WithAuthorization()) - } - } - } - return r, err - }) - } -} - -// returns true if the HTTP response contains a bearer challenge -func hasBearerChallenge(header http.Header) bool { - authHeader := header.Get(bearerChallengeHeader) - if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { - return false - } - return true -} - -type bearerChallenge struct { - values map[string]string -} - -func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) { - challenge := strings.TrimSpace(header.Get(bearerChallengeHeader)) - trimmedChallenge := challenge[len(bearer)+1:] - - // challenge is a set of key=value pairs that are comma delimited - pairs := strings.Split(trimmedChallenge, ",") - if len(pairs) < 1 { - err = fmt.Errorf("challenge '%s' contains no pairs", challenge) - return bc, err - } - - bc.values = make(map[string]string) - for i := range pairs { - trimmedPair := strings.TrimSpace(pairs[i]) - pair := strings.Split(trimmedPair, "=") - if len(pair) == 2 { - // remove the enclosing quotes - key := strings.Trim(pair[0], "\"") - value := strings.Trim(pair[1], "\"") - - switch key { - case "authorization", "authorization_uri": - // strip the tenant ID from the authorization URL - asURL, err := url.Parse(value) - if err != nil { - return bc, err - } - bc.values[tenantID] = asURL.Path[1:] - default: - bc.values[key] = value - } - } - } - - return bc, err -} - -// EventGridKeyAuthorizer implements authorization for event grid using key authentication. -type EventGridKeyAuthorizer struct { - topicKey string -} - -// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer -// with the specified topic key. -func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { - return EventGridKeyAuthorizer{topicKey: topicKey} -} - -// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. -func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { - headers := map[string]interface{}{ - "aeg-sas-key": egta.topicKey, - } - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header -// with the value "Basic " where is a base64-encoded username:password tuple. -type BasicAuthorizer struct { - userName string - password string -} - -// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. -func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { - return &BasicAuthorizer{ - userName: userName, - password: password, - } -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Basic " followed by the base64-encoded username:password tuple. -func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. -type MultiTenantServicePrincipalTokenAuthorizer interface { - WithAuthorization() PrepareDecorator -} - -// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider -func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { - return NewMultiTenantBearerAuthorizer(tp) -} - -// MultiTenantBearerAuthorizer implements bearer authorization across multiple tenants. -type MultiTenantBearerAuthorizer struct { - tp adal.MultitenantOAuthTokenProvider -} - -// NewMultiTenantBearerAuthorizer creates a MultiTenantBearerAuthorizer using the given token provider. -func NewMultiTenantBearerAuthorizer(tp adal.MultitenantOAuthTokenProvider) *MultiTenantBearerAuthorizer { - return &MultiTenantBearerAuthorizer{tp: tp} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the -// primary token along with the auxiliary authorization header using the auxiliary tokens. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (mt *MultiTenantBearerAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, - "Failed to refresh one or more Tokens for request to %s", r.URL) - } - } - r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) - if err != nil { - return r, err - } - auxTokens := mt.tp.AuxiliaryOAuthTokens() - for i := range auxTokens { - auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) - } - return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", "))) - }) - } -} - -// TokenProvider returns the underlying MultitenantOAuthTokenProvider for this authorizer. -func (mt *MultiTenantBearerAuthorizer) TokenProvider() adal.MultitenantOAuthTokenProvider { - return mt.tp -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go deleted file mode 100644 index 6650149..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go +++ /dev/null @@ -1,66 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "strings" -) - -// SASTokenAuthorizer implements an authorization for SAS Token Authentication -// this can be used for interaction with Blob Storage Endpoints -type SASTokenAuthorizer struct { - sasToken string -} - -// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials -func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) { - if strings.TrimSpace(sasToken) == "" { - return nil, fmt.Errorf("sasToken cannot be empty") - } - - token := sasToken - if strings.HasPrefix(sasToken, "?") { - token = strings.TrimPrefix(sasToken, "?") - } - - return &SASTokenAuthorizer{ - sasToken: token, - }, nil -} - -// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the -// URI's query parameters. This can be used for the Blob, Queue, and File Services. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature -func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - - if r.URL.RawQuery == "" { - r.URL.RawQuery = sas.sasToken - } else if !strings.Contains(r.URL.RawQuery, sas.sasToken) { - r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken) - } - - return Prepare(r) - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go deleted file mode 100644 index 2af5030..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go +++ /dev/null @@ -1,307 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "sort" - "strings" - "time" -) - -// SharedKeyType defines the enumeration for the various shared key types. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types. -type SharedKeyType string - -const ( - // SharedKey is used to authorize against blobs, files and queues services. - SharedKey SharedKeyType = "sharedKey" - - // SharedKeyForTable is used to authorize against the table service. - SharedKeyForTable SharedKeyType = "sharedKeyTable" - - // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for - // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead. - SharedKeyLite SharedKeyType = "sharedKeyLite" - - // SharedKeyLiteForTable is used to authorize against the table service. It's provided for - // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead. - SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable" -) - -const ( - headerAccept = "Accept" - headerAcceptCharset = "Accept-Charset" - headerContentEncoding = "Content-Encoding" - headerContentLength = "Content-Length" - headerContentMD5 = "Content-MD5" - headerContentLanguage = "Content-Language" - headerIfModifiedSince = "If-Modified-Since" - headerIfMatch = "If-Match" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerDate = "Date" - headerXMSDate = "X-Ms-Date" - headerXMSVersion = "x-ms-version" - headerRange = "Range" -) - -const storageEmulatorAccountName = "devstoreaccount1" - -// SharedKeyAuthorizer implements an authorization for Shared Key -// this can be used for interaction with Blob, File and Queue Storage Endpoints -type SharedKeyAuthorizer struct { - accountName string - accountKey []byte - keyType SharedKeyType -} - -// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type. -func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) { - key, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return nil, fmt.Errorf("malformed storage account key: %v", err) - } - return &SharedKeyAuthorizer{ - accountName: accountName, - accountKey: key, - keyType: keyType, - }, nil -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is " " followed by the computed key. -// This can be used for the Blob, Queue, and File Services -// -// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key -// You may use Shared Key authorization to authorize a request made against the -// 2009-09-19 version and later of the Blob and Queue services, -// and version 2014-02-14 and later of the File services. -func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - - sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType) - if err != nil { - return r, err - } - return Prepare(r, WithHeader(headerAuthorization, sk)) - }) - } -} - -func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) { - canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType) - if err != nil { - return "", err - } - - if req.Header == nil { - req.Header = http.Header{} - } - - // ensure date is set - if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" { - date := time.Now().UTC().Format(http.TimeFormat) - req.Header.Set(headerXMSDate, date) - } - canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType) - if err != nil { - return "", err - } - return createAuthorizationHeader(accName, accKey, canString, keyType), nil -} - -func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) { - errMsg := "buildCanonicalizedResource error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := bytes.NewBufferString("") - if accountName != storageEmulatorAccountName { - cr.WriteString("/") - cr.WriteString(getCanonicalizedAccountName(accountName)) - } - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr.WriteString(u.EscapedPath()) - } else { - // a slash is required to indicate the root path - cr.WriteString("/") - } - - params, err := url.ParseQuery(u.RawQuery) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 - if keyType == SharedKey { - if len(params) > 0 { - cr.WriteString("\n") - - keys := []string{} - for key := range params { - keys = append(keys, key) - } - sort.Strings(keys) - - completeParams := []string{} - for _, key := range keys { - if len(params[key]) > 1 { - sort.Strings(params[key]) - } - - completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) - } - cr.WriteString(strings.Join(completeParams, "\n")) - } - } else { - // search for "comp" parameter, if exists then add it to canonicalizedresource - if v, ok := params["comp"]; ok { - cr.WriteString("?comp=" + v[0]) - } - } - - return string(cr.Bytes()), nil -} - -func getCanonicalizedAccountName(accountName string) string { - // since we may be trying to access a secondary storage account, we need to - // remove the -secondary part of the storage name - return strings.TrimSuffix(accountName, "-secondary") -} - -func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) { - contentLength := headers.Get(headerContentLength) - if contentLength == "0" { - contentLength = "" - } - date := headers.Get(headerDate) - if v := headers.Get(headerXMSDate); v != "" { - if keyType == SharedKey || keyType == SharedKeyLite { - date = "" - } else { - date = v - } - } - var canString string - switch keyType { - case SharedKey: - canString = strings.Join([]string{ - verb, - headers.Get(headerContentEncoding), - headers.Get(headerContentLanguage), - contentLength, - headers.Get(headerContentMD5), - headers.Get(headerContentType), - date, - headers.Get(headerIfModifiedSince), - headers.Get(headerIfMatch), - headers.Get(headerIfNoneMatch), - headers.Get(headerIfUnmodifiedSince), - headers.Get(headerRange), - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case SharedKeyForTable: - canString = strings.Join([]string{ - verb, - headers.Get(headerContentMD5), - headers.Get(headerContentType), - date, - canonicalizedResource, - }, "\n") - case SharedKeyLite: - canString = strings.Join([]string{ - verb, - headers.Get(headerContentMD5), - headers.Get(headerContentType), - date, - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case SharedKeyLiteForTable: - canString = strings.Join([]string{ - date, - canonicalizedResource, - }, "\n") - default: - return "", fmt.Errorf("key type '%s' is not supported", keyType) - } - return canString, nil -} - -func buildCanonicalizedHeader(headers http.Header) string { - cm := make(map[string]string) - - for k := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - if strings.HasPrefix(headerName, "x-ms-") { - cm[headerName] = headers.Get(k) - } - } - - if len(cm) == 0 { - return "" - } - - keys := []string{} - for key := range cm { - keys = append(keys, key) - } - - sort.Strings(keys) - - ch := bytes.NewBufferString("") - - for _, key := range keys { - ch.WriteString(key) - ch.WriteRune(':') - ch.WriteString(cm[key]) - ch.WriteRune('\n') - } - - return strings.TrimSuffix(string(ch.Bytes()), "\n") -} - -func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { - h := hmac.New(sha256.New, accountKey) - h.Write([]byte(canonicalizedString)) - signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) - var key string - switch keyType { - case SharedKey, SharedKeyForTable: - key = "SharedKey" - case SharedKeyLite, SharedKeyLiteForTable: - key = "SharedKeyLite" - } - return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go deleted file mode 100644 index aafdf02..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - -will set the URL to: - - https://microsoft.com/a/b/c - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., ByUnmarshallingJson) is likely incorrect. - -Lastly, the Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure -correct parsing and formatting. - -Errors raised by autorest objects and methods will conform to the autorest.Error interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. -*/ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" - "time" -) - -const ( - // HeaderLocation specifies the HTTP Location header. - HeaderLocation = "Location" - - // HeaderRetryAfter specifies the HTTP Retry-After header. - HeaderRetryAfter = "Retry-After" -) - -// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set -// and false otherwise. -func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { - if resp == nil { - return false - } - return containsInt(codes, resp.StatusCode) -} - -// GetLocation retrieves the URL from the Location header of the passed response. -func GetLocation(resp *http.Response) string { - return resp.Header.Get(HeaderLocation) -} - -// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { - retry := resp.Header.Get(HeaderRetryAfter) - if retry == "" { - return defaultDelay - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - return defaultDelay - } - - return d -} - -// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. -func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare(&http.Request{Cancel: cancel}, - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} - -// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. -func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare((&http.Request{}).WithContext(ctx), - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go deleted file mode 100644 index 45575ee..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ /dev/null @@ -1,995 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/logger" - "github.com/Azure/go-autorest/tracing" -) - -const ( - headerAsyncOperation = "Azure-AsyncOperation" -) - -const ( - operationInProgress string = "InProgress" - operationCanceled string = "Canceled" - operationFailed string = "Failed" - operationSucceeded string = "Succeeded" -) - -var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} - -// FutureAPI contains the set of methods on the Future type. -type FutureAPI interface { - // Response returns the last HTTP response. - Response() *http.Response - - // Status returns the last status message of the operation. - Status() string - - // PollingMethod returns the method used to monitor the status of the asynchronous operation. - PollingMethod() PollingMethodType - - // DoneWithContext queries the service to see if the operation has completed. - DoneWithContext(context.Context, autorest.Sender) (bool, error) - - // GetPollingDelay returns a duration the application should wait before checking - // the status of the asynchronous request and true; this value is returned from - // the service via the Retry-After response header. If the header wasn't returned - // then the function returns the zero-value time.Duration and false. - GetPollingDelay() (time.Duration, bool) - - // WaitForCompletionRef will return when one of the following conditions is met: the long - // running operation has completed, the provided context is cancelled, or the client's - // polling duration has been exceeded. It will retry failed polling attempts based on - // the retry value defined in the client up to the maximum retry attempts. - // If no deadline is specified in the context then the client.PollingDuration will be - // used to determine if a default deadline should be used. - // If PollingDuration is greater than zero the value will be used as the context's timeout. - // If PollingDuration is zero then no default deadline will be used. - WaitForCompletionRef(context.Context, autorest.Client) error - - // MarshalJSON implements the json.Marshaler interface. - MarshalJSON() ([]byte, error) - - // MarshalJSON implements the json.Unmarshaler interface. - UnmarshalJSON([]byte) error - - // PollingURL returns the URL used for retrieving the status of the long-running operation. - PollingURL() string - - // GetResult should be called once polling has completed successfully. - // It makes the final GET call to retrieve the resultant payload. - GetResult(autorest.Sender) (*http.Response, error) -} - -var _ FutureAPI = (*Future)(nil) - -// Future provides a mechanism to access the status and results of an asynchronous request. -// Since futures are stateful they should be passed by value to avoid race conditions. -type Future struct { - pt pollingTracker -} - -// NewFutureFromResponse returns a new Future object initialized -// with the initial response from an asynchronous operation. -func NewFutureFromResponse(resp *http.Response) (Future, error) { - pt, err := createPollingTracker(resp) - return Future{pt: pt}, err -} - -// Response returns the last HTTP response. -func (f Future) Response() *http.Response { - if f.pt == nil { - return nil - } - return f.pt.latestResponse() -} - -// Status returns the last status message of the operation. -func (f Future) Status() string { - if f.pt == nil { - return "" - } - return f.pt.pollingStatus() -} - -// PollingMethod returns the method used to monitor the status of the asynchronous operation. -func (f Future) PollingMethod() PollingMethodType { - if f.pt == nil { - return PollingUnknown - } - return f.pt.pollingMethod() -} - -// DoneWithContext queries the service to see if the operation has completed. -func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - - if f.pt == nil { - return false, autorest.NewError("Future", "Done", "future is not initialized") - } - if f.pt.hasTerminated() { - return true, f.pt.pollingError() - } - if err := f.pt.pollForStatus(ctx, sender); err != nil { - return false, err - } - if err := f.pt.checkForErrors(); err != nil { - return f.pt.hasTerminated(), err - } - if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { - return false, err - } - if err := f.pt.initPollingMethod(); err != nil { - return false, err - } - if err := f.pt.updatePollingMethod(); err != nil { - return false, err - } - return f.pt.hasTerminated(), f.pt.pollingError() -} - -// GetPollingDelay returns a duration the application should wait before checking -// the status of the asynchronous request and true; this value is returned from -// the service via the Retry-After response header. If the header wasn't returned -// then the function returns the zero-value time.Duration and false. -func (f Future) GetPollingDelay() (time.Duration, bool) { - if f.pt == nil { - return 0, false - } - resp := f.pt.latestResponse() - if resp == nil { - return 0, false - } - - retry := resp.Header.Get(autorest.HeaderRetryAfter) - if retry == "" { - return 0, false - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - panic(err) - } - - return d, true -} - -// WaitForCompletionRef will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// If no deadline is specified in the context then the client.PollingDuration will be -// used to determine if a default deadline should be used. -// If PollingDuration is greater than zero the value will be used as the context's timeout. -// If PollingDuration is zero then no default deadline will be used. -func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - cancelCtx := ctx - // if the provided context already has a deadline don't override it - _, hasDeadline := ctx.Deadline() - if d := client.PollingDuration; !hasDeadline && d != 0 { - var cancel context.CancelFunc - cancelCtx, cancel = context.WithTimeout(ctx, d) - defer cancel() - } - // if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll - if delay, ok := f.GetPollingDelay(); ok { - logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: initial polling delay") - if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed { - err = cancelCtx.Err() - return - } - } - done, err := f.DoneWithContext(ctx, client) - for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { - if attempts >= client.RetryAttempts { - return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") - } - // we want delayAttempt to be zero in the non-error case so - // that DelayForBackoff doesn't perform exponential back-off - var delayAttempt int - var delay time.Duration - if err == nil { - // check for Retry-After delay, if not present use the client's polling delay - var ok bool - delay, ok = f.GetPollingDelay() - if !ok { - logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: Using client polling delay") - delay = client.PollingDelay - } - } else { - // there was an error polling for status so perform exponential - // back-off based on the number of attempts using the client's retry - // duration. update attempts after delayAttempt to avoid off-by-one. - logger.Instance.Writef(logger.LogError, "WaitForCompletionRef: %s\n", err) - delayAttempt = attempts - delay = client.RetryDuration - attempts++ - } - // wait until the delay elapses or the context is cancelled - delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) - if !delayElapsed { - return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") - } - } - return -} - -// MarshalJSON implements the json.Marshaler interface. -func (f Future) MarshalJSON() ([]byte, error) { - return json.Marshal(f.pt) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (f *Future) UnmarshalJSON(data []byte) error { - // unmarshal into JSON object to determine the tracker type - obj := map[string]interface{}{} - err := json.Unmarshal(data, &obj) - if err != nil { - return err - } - if obj["method"] == nil { - return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") - } - method := obj["method"].(string) - switch strings.ToUpper(method) { - case http.MethodDelete: - f.pt = &pollingTrackerDelete{} - case http.MethodPatch: - f.pt = &pollingTrackerPatch{} - case http.MethodPost: - f.pt = &pollingTrackerPost{} - case http.MethodPut: - f.pt = &pollingTrackerPut{} - default: - return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) - } - // now unmarshal into the tracker - return json.Unmarshal(data, &f.pt) -} - -// PollingURL returns the URL used for retrieving the status of the long-running operation. -func (f Future) PollingURL() string { - if f.pt == nil { - return "" - } - return f.pt.pollingURL() -} - -// GetResult should be called once polling has completed successfully. -// It makes the final GET call to retrieve the resultant payload. -func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { - if f.pt.finalGetURL() == "" { - // we can end up in this situation if the async operation returns a 200 - // with no polling URLs. in that case return the response which should - // contain the JSON payload (only do this for successful terminal cases). - if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { - return lr, nil - } - return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") - } - req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) - if err != nil { - return nil, err - } - resp, err := sender.Do(req) - if err == nil && resp.Body != nil { - // copy the body and close it so callers don't have to - defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return resp, err - } - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - return resp, err -} - -type pollingTracker interface { - // these methods can differ per tracker - - // checks the response headers and status code to determine the polling mechanism - updatePollingMethod() error - - // checks the response for tracker-specific error conditions - checkForErrors() error - - // returns true if provisioning state should be checked - provisioningStateApplicable() bool - - // methods common to all trackers - - // initializes a tracker's polling URL and method, called for each iteration. - // these values can be overridden by each polling tracker as required. - initPollingMethod() error - - // initializes the tracker's internal state, call this when the tracker is created - initializeState() error - - // makes an HTTP request to check the status of the LRO - pollForStatus(ctx context.Context, sender autorest.Sender) error - - // updates internal tracker state, call this after each call to pollForStatus - updatePollingState(provStateApl bool) error - - // returns the error response from the service, can be nil - pollingError() error - - // returns the polling method being used - pollingMethod() PollingMethodType - - // returns the state of the LRO as returned from the service - pollingStatus() string - - // returns the URL used for polling status - pollingURL() string - - // returns the URL used for the final GET to retrieve the resource - finalGetURL() string - - // returns true if the LRO is in a terminal state - hasTerminated() bool - - // returns true if the LRO is in a failed terminal state - hasFailed() bool - - // returns true if the LRO is in a successful terminal state - hasSucceeded() bool - - // returns the cached HTTP response after a call to pollForStatus(), can be nil - latestResponse() *http.Response -} - -type pollingTrackerBase struct { - // resp is the last response, either from the submission of the LRO or from polling - resp *http.Response - - // method is the HTTP verb, this is needed for deserialization - Method string `json:"method"` - - // rawBody is the raw JSON response body - rawBody map[string]interface{} - - // denotes if polling is using async-operation or location header - Pm PollingMethodType `json:"pollingMethod"` - - // the URL to poll for status - URI string `json:"pollingURI"` - - // the state of the LRO as returned from the service - State string `json:"lroState"` - - // the URL to GET for the final result - FinalGetURI string `json:"resultURI"` - - // used to hold an error object returned from the service - Err *ServiceError `json:"error,omitempty"` -} - -func (pt *pollingTrackerBase) initializeState() error { - // determine the initial polling state based on response body and/or HTTP status - // code. this is applicable to the initial LRO response, not polling responses! - pt.Method = pt.resp.Request.Method - if err := pt.updateRawBody(); err != nil { - return err - } - switch pt.resp.StatusCode { - case http.StatusOK: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - if pt.hasFailed() { - pt.updateErrorFromResponse() - return pt.pollingError() - } - } else { - pt.State = operationSucceeded - } - case http.StatusCreated: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationInProgress - } - case http.StatusAccepted: - pt.State = operationInProgress - case http.StatusNoContent: - pt.State = operationSucceeded - default: - pt.State = operationFailed - pt.updateErrorFromResponse() - return pt.pollingError() - } - return pt.initPollingMethod() -} - -func (pt pollingTrackerBase) getProvisioningState() *string { - if pt.rawBody != nil && pt.rawBody["properties"] != nil { - p := pt.rawBody["properties"].(map[string]interface{}) - if ps := p["provisioningState"]; ps != nil { - s := ps.(string) - return &s - } - } - return nil -} - -func (pt *pollingTrackerBase) updateRawBody() error { - pt.rawBody = map[string]interface{}{} - if pt.resp.ContentLength != 0 { - defer pt.resp.Body.Close() - b, err := ioutil.ReadAll(pt.resp.Body) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") - } - // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty - if len(b) == 0 { - return nil - } - if err = json.Unmarshal(b, &pt.rawBody); err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") - } - } - return nil -} - -func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { - req, err := http.NewRequest(http.MethodGet, pt.URI, nil) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") - } - - req = req.WithContext(ctx) - preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) - req, err = preparer.Prepare(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") - } - pt.resp, err = sender.Do(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") - } - if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { - // reset the service error on success case - pt.Err = nil - err = pt.updateRawBody() - } else { - // check response body for error content - pt.updateErrorFromResponse() - err = pt.pollingError() - } - return err -} - -// attempts to unmarshal a ServiceError type from the response body. -// if that fails then make a best attempt at creating something meaningful. -// NOTE: this assumes that the async operation has failed. -func (pt *pollingTrackerBase) updateErrorFromResponse() { - var err error - if pt.resp.ContentLength != 0 { - type respErr struct { - ServiceError *ServiceError `json:"error"` - } - re := respErr{} - defer pt.resp.Body.Close() - var b []byte - if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { - goto Default - } - // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - if len(b) == 0 { - goto Default - } - if err = json.Unmarshal(b, &re); err != nil { - goto Default - } - // unmarshalling the error didn't yield anything, try unwrapped error - if re.ServiceError == nil { - err = json.Unmarshal(b, &re.ServiceError) - if err != nil { - goto Default - } - } - // the unmarshaller will ensure re.ServiceError is non-nil - // even if there was no content unmarshalled so check the code. - if re.ServiceError.Code != "" { - pt.Err = re.ServiceError - return - } - } -Default: - se := &ServiceError{ - Code: pt.pollingStatus(), - Message: "The async operation failed.", - } - if err != nil { - se.InnerError = make(map[string]interface{}) - se.InnerError["unmarshalError"] = err.Error() - } - // stick the response body into the error object in hopes - // it contains something useful to help diagnose the failure. - if len(pt.rawBody) > 0 { - se.AdditionalInfo = []map[string]interface{}{ - pt.rawBody, - } - } - pt.Err = se -} - -func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { - if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { - pt.State = pt.rawBody["status"].(string) - } else { - if pt.resp.StatusCode == http.StatusAccepted { - pt.State = operationInProgress - } else if provStateApl { - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationSucceeded - } - } else { - return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") - } - } - // if the operation has failed update the error state - if pt.hasFailed() { - pt.updateErrorFromResponse() - } - return nil -} - -func (pt pollingTrackerBase) pollingError() error { - if pt.Err == nil { - return nil - } - return pt.Err -} - -func (pt pollingTrackerBase) pollingMethod() PollingMethodType { - return pt.Pm -} - -func (pt pollingTrackerBase) pollingStatus() string { - return pt.State -} - -func (pt pollingTrackerBase) pollingURL() string { - return pt.URI -} - -func (pt pollingTrackerBase) finalGetURL() string { - return pt.FinalGetURI -} - -func (pt pollingTrackerBase) hasTerminated() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) hasFailed() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) -} - -func (pt pollingTrackerBase) hasSucceeded() bool { - return strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) latestResponse() *http.Response { - return pt.resp -} - -// error checking common to all trackers -func (pt pollingTrackerBase) baseCheckForErrors() error { - // for Azure-AsyncOperations the response body cannot be nil or empty - if pt.Pm == PollingAsyncOperation { - if pt.resp.Body == nil || pt.resp.ContentLength == 0 { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") - } - if pt.rawBody["status"] == nil { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") - } - } - return nil -} - -// default initialization of polling URL/method. each verb tracker will update this as required. -func (pt *pollingTrackerBase) initPollingMethod() error { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - return nil - } - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh != "" { - pt.URI = lh - pt.Pm = PollingLocation - return nil - } - // it's ok if we didn't find a polling header, this will be handled elsewhere - return nil -} - -// DELETE - -type pollingTrackerDelete struct { - pollingTrackerBase -} - -func (pt *pollingTrackerDelete) updatePollingMethod() error { - // for 201 the Location header is required - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - } - pt.Pm = PollingLocation - pt.FinalGetURI = pt.URI - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerDelete) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerDelete) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PATCH - -type pollingTrackerPatch struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPatch) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - // note the absence of the "final GET" mechanism for PATCH - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - if ao == "" { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } else { - pt.URI = lh - pt.Pm = PollingLocation - } - } - } - return nil -} - -func (pt pollingTrackerPatch) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPatch) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// POST - -type pollingTrackerPost struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPost) updatePollingMethod() error { - // 201 requires Location header - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - pt.FinalGetURI = lh - pt.Pm = PollingLocation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPost) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPost) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PUT - -type pollingTrackerPut struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPut) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPut) checkForErrors() error { - err := pt.baseCheckForErrors() - if err != nil { - return err - } - // if there are no LRO headers then the body cannot be empty - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } - lh, err := getURLFromLocationHeader(pt.resp) - if err != nil { - return err - } - if ao == "" && lh == "" && len(pt.rawBody) == 0 { - return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") - } - return nil -} - -func (pt pollingTrackerPut) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// creates a polling tracker based on the verb of the original request -func createPollingTracker(resp *http.Response) (pollingTracker, error) { - var pt pollingTracker - switch strings.ToUpper(resp.Request.Method) { - case http.MethodDelete: - pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPatch: - pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPost: - pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPut: - pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} - default: - return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) - } - if err := pt.initializeState(); err != nil { - return pt, err - } - // this initializes the polling header values, we do this during creation in case the - // initial response send us invalid values; this way the API call will return a non-nil - // error (not doing this means the error shows up in Future.Done) - return pt, pt.updatePollingMethod() -} - -// gets the polling URL from the Azure-AsyncOperation header. -// ensures the URL is well-formed and absolute. -func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// gets the polling URL from the Location header. -// ensures the URL is well-formed and absolute. -func getURLFromLocationHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// verify that the URL is valid and absolute -func isValidURL(s string) bool { - u, err := url.Parse(s) - return err == nil && u.IsAbs() -} - -// PollingMethodType defines a type used for enumerating polling mechanisms. -type PollingMethodType string - -const ( - // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. - PollingAsyncOperation PollingMethodType = "AsyncOperation" - - // PollingLocation indicates the polling method uses the Location header. - PollingLocation PollingMethodType = "Location" - - // PollingRequestURI indicates the polling method uses the original request URI. - PollingRequestURI PollingMethodType = "RequestURI" - - // PollingUnknown indicates an unknown polling method and is the default value. - PollingUnknown PollingMethodType = "" -) - -// AsyncOpIncompleteError is the type that's returned from a future that has not completed. -type AsyncOpIncompleteError struct { - // FutureType is the name of the type composed of a azure.Future. - FutureType string -} - -// Error returns an error message including the originating type name of the error. -func (e AsyncOpIncompleteError) Error() string { - return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) -} - -// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. -func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { - return AsyncOpIncompleteError{ - FutureType: futureType, - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go deleted file mode 100644 index 1328f17..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ /dev/null @@ -1,388 +0,0 @@ -// Package azure provides Azure-specific implementations used with AutoRest. -// See the included examples for more detail. -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strconv" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - // HeaderClientID is the Azure extension header to set a user-specified request ID. - HeaderClientID = "x-ms-client-request-id" - - // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID - // should be included in the response. - HeaderReturnClientID = "x-ms-return-client-request-id" - - // HeaderContentType is the type of the content in the HTTP response. - HeaderContentType = "Content-Type" - - // HeaderRequestID is the Azure extension header of the service generated request ID returned - // in the response. - HeaderRequestID = "x-ms-request-id" -) - -// ServiceError encapsulates the error response from an Azure service. -// It adhears to the OData v4 specification for error responses. -type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` -} - -func (se ServiceError) Error() string { - result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) - - if se.Target != nil { - result += fmt.Sprintf(" Target=%q", *se.Target) - } - - if se.Details != nil { - d, err := json.Marshal(se.Details) - if err != nil { - result += fmt.Sprintf(" Details=%v", se.Details) - } - result += fmt.Sprintf(" Details=%s", d) - } - - if se.InnerError != nil { - d, err := json.Marshal(se.InnerError) - if err != nil { - result += fmt.Sprintf(" InnerError=%v", se.InnerError) - } - result += fmt.Sprintf(" InnerError=%s", d) - } - - if se.AdditionalInfo != nil { - d, err := json.Marshal(se.AdditionalInfo) - if err != nil { - result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) - } - result += fmt.Sprintf(" AdditionalInfo=%s", d) - } - - return result -} - -// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. -func (se *ServiceError) UnmarshalJSON(b []byte) error { - // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 - - type serviceErrorInternal struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target,omitempty"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"` - // not all services conform to the OData v4 spec. - // the following fields are where we've seen discrepancies - - // spec calls for []map[string]interface{} but have seen map[string]interface{} - Details interface{} `json:"details,omitempty"` - - // spec calls for map[string]interface{} but have seen []map[string]interface{} and string - InnerError interface{} `json:"innererror,omitempty"` - } - - sei := serviceErrorInternal{} - if err := json.Unmarshal(b, &sei); err != nil { - return err - } - - // copy the fields we know to be correct - se.AdditionalInfo = sei.AdditionalInfo - se.Code = sei.Code - se.Message = sei.Message - se.Target = sei.Target - - // converts an []interface{} to []map[string]interface{} - arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) { - arrayOf, ok := v.([]interface{}) - if !ok { - return nil, false - } - final := []map[string]interface{}{} - for _, item := range arrayOf { - as, ok := item.(map[string]interface{}) - if !ok { - return nil, false - } - final = append(final, as) - } - return final, true - } - - // convert the remaining fields, falling back to raw JSON if necessary - - if c, ok := arrayOfObjs(sei.Details); ok { - se.Details = c - } else if c, ok := sei.Details.(map[string]interface{}); ok { - se.Details = []map[string]interface{}{c} - } else if sei.Details != nil { - // stuff into Details - se.Details = []map[string]interface{}{ - {"raw": sei.Details}, - } - } - - if c, ok := sei.InnerError.(map[string]interface{}); ok { - se.InnerError = c - } else if c, ok := arrayOfObjs(sei.InnerError); ok { - // if there's only one error extract it - if len(c) == 1 { - se.InnerError = c[0] - } else { - // multiple errors, stuff them into the value - se.InnerError = map[string]interface{}{ - "multi": c, - } - } - } else if c, ok := sei.InnerError.(string); ok { - se.InnerError = map[string]interface{}{"error": c} - } else if sei.InnerError != nil { - // stuff into InnerError - se.InnerError = map[string]interface{}{ - "raw": sei.InnerError, - } - } - return nil -} - -// RequestError describes an error response returned by Azure service. -type RequestError struct { - autorest.DetailedError - - // The error returned by the Azure service. - ServiceError *ServiceError `json:"error" xml:"Error"` - - // The request id (from the x-ms-request-id-header) of the request. - RequestID string -} - -// Error returns a human-friendly error message from service error. -func (e RequestError) Error() string { - return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", - e.StatusCode, e.ServiceError) -} - -// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. -func IsAzureError(e error) bool { - _, ok := e.(*RequestError) - return ok -} - -// Resource contains details about an Azure resource. -type Resource struct { - SubscriptionID string - ResourceGroup string - Provider string - ResourceType string - ResourceName string -} - -// String function returns a string in form of azureResourceID -func (r Resource) String() string { - return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName) -} - -// ParseResourceID parses a resource ID into a ResourceDetails struct. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/template-functions-resource?tabs=json#resourceid. -func ParseResourceID(resourceID string) (Resource, error) { - - const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` - resourceIDPattern := regexp.MustCompile(resourceIDPatternText) - match := resourceIDPattern.FindStringSubmatch(resourceID) - - if len(match) == 0 { - return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) - } - - v := strings.Split(match[5], "/") - resourceName := v[len(v)-1] - - result := Resource{ - SubscriptionID: match[1], - ResourceGroup: match[2], - Provider: match[3], - ResourceType: match[4], - ResourceName: resourceName, - } - - return result, nil -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { - if v, ok := original.(*RequestError); ok { - return *v - } - - statusCode := autorest.UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - return RequestError{ - DetailedError: autorest.DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - }, - } -} - -// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id -// header to true such that UUID accompanies the http.Response. -func WithReturningClientID(uuid string) autorest.PrepareDecorator { - preparer := autorest.CreatePreparer( - WithClientID(uuid), - WithReturnClientID(true)) - - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - return preparer.Prepare(r) - }) - } -} - -// WithClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). -func WithClientID(uuid string) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderClientID, uuid) -} - -// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-return-client-request-id whose boolean value indicates if the value of the -// x-ms-client-request-id header should be included in the http.Response. -func WithReturnClientID(b bool) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) -} - -// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the -// http.Request sent to the service (and returned in the http.Response) -func ExtractClientID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderClientID, resp) -} - -// ExtractRequestID extracts the Azure server generated request identifier from the -// x-ms-request-id header. -func ExtractRequestID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderRequestID, resp) -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an -// azure.RequestError by reading the response body unless the response HTTP status code -// is among the set passed. -// -// If there is a chance service may return responses other than the Azure error -// format and the response cannot be parsed into an error, a decoding error will -// be returned containing the response body. In any case, the Responder will -// return an error if the status code is not satisfied. -// -// If this Responder returns an error, the response body will be replaced with -// an in-memory reader, which needs no further closing. -func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { - var e RequestError - defer resp.Body.Close() - - encodedAs := autorest.EncodedAsJSON - if strings.Contains(resp.Header.Get("Content-Type"), "xml") { - encodedAs = autorest.EncodedAsXML - } - - // Copy and replace the Body in case it does not contain an error object. - // This will leave the Body available to the caller. - b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) - resp.Body = ioutil.NopCloser(&b) - if decodeErr != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, decodeErr) - } - if e.ServiceError == nil { - // Check if error is unwrapped ServiceError - decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) - if err := decoder.Decode(&e.ServiceError); err != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, err) - } - - // for example, should the API return the literal value `null` as the response - if e.ServiceError == nil { - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - Details: []map[string]interface{}{ - { - "HttpResponse.Body": b.String(), - }, - }, - } - } - } - - if e.ServiceError != nil && e.ServiceError.Message == "" { - // if we're here it means the returned error wasn't OData v4 compliant. - // try to unmarshal the body in hopes of getting something. - rawBody := map[string]interface{}{} - decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) - if err := decoder.Decode(&rawBody); err != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, err) - } - - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - } - if len(rawBody) > 0 { - e.ServiceError.Details = []map[string]interface{}{rawBody} - } - } - e.Response = resp - e.RequestID = ExtractRequestID(resp) - if e.StatusCode == nil { - e.StatusCode = resp.StatusCode - } - err = &e - } - return err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go deleted file mode 100644 index b0a5376..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ /dev/null @@ -1,331 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" -) - -const ( - // EnvironmentFilepathName captures the name of the environment variable containing the path to the file - // to be used while populating the Azure Environment. - EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" - - // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. - NotAvailable = "N/A" -) - -var environments = map[string]Environment{ - "AZURECHINACLOUD": ChinaCloud, - "AZUREGERMANCLOUD": GermanCloud, - "AZURECLOUD": PublicCloud, - "AZUREPUBLICCLOUD": PublicCloud, - "AZUREUSGOVERNMENT": USGovernmentCloud, - "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, //TODO: deprecate -} - -// ResourceIdentifier contains a set of Azure resource IDs. -type ResourceIdentifier struct { - Graph string `json:"graph"` - KeyVault string `json:"keyVault"` - Datalake string `json:"datalake"` - Batch string `json:"batch"` - OperationalInsights string `json:"operationalInsights"` - OSSRDBMS string `json:"ossRDBMS"` - Storage string `json:"storage"` - Synapse string `json:"synapse"` - ServiceBus string `json:"serviceBus"` - SQLDatabase string `json:"sqlDatabase"` - CosmosDB string `json:"cosmosDB"` - ManagedHSM string `json:"managedHSM"` - MicrosoftGraph string `json:"microsoftGraph"` -} - -// Environment represents a set of endpoints for each of Azure's Clouds. -type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - ManagedHSMEndpoint string `json:"managedHSMEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - ServiceBusEndpoint string `json:"serviceBusEndpoint"` - BatchManagementEndpoint string `json:"batchManagementEndpoint"` - MicrosoftGraphEndpoint string `json:"microsoftGraphEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` - MariaDBDNSSuffix string `json:"mariaDBDNSSuffix"` - MySQLDatabaseDNSSuffix string `json:"mySqlDatabaseDNSSuffix"` - PostgresqlDatabaseDNSSuffix string `json:"postgresqlDatabaseDNSSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ManagedHSMDNSSuffix string `json:"managedHSMDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` - TokenAudience string `json:"tokenAudience"` - APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"` - SynapseEndpointSuffix string `json:"synapseEndpointSuffix"` - DatalakeSuffix string `json:"datalakeSuffix"` - ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` -} - -var ( - // PublicCloud is the default public Azure cloud environment - PublicCloud = Environment{ - Name: "AzurePublicCloud", - ManagementPortalURL: "https://manage.windowsazure.com/", - PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.windows.net/", - ResourceManagerEndpoint: "https://management.azure.com/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", - ManagedHSMEndpoint: "https://managedhsm.azure.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.windows.net/", - BatchManagementEndpoint: "https://batch.core.windows.net/", - MicrosoftGraphEndpoint: "https://graph.microsoft.com/", - StorageEndpointSuffix: "core.windows.net", - CosmosDBDNSSuffix: "documents.azure.com", - MariaDBDNSSuffix: "mariadb.database.azure.com", - MySQLDatabaseDNSSuffix: "mysql.database.azure.com", - PostgresqlDatabaseDNSSuffix: "postgres.database.azure.com", - SQLDatabaseDNSSuffix: "database.windows.net", - TrafficManagerDNSSuffix: "trafficmanager.net", - KeyVaultDNSSuffix: "vault.azure.net", - ManagedHSMDNSSuffix: "managedhsm.azure.net", - ServiceBusEndpointSuffix: "servicebus.windows.net", - ServiceManagementVMDNSSuffix: "cloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.azure.com", - ContainerRegistryDNSSuffix: "azurecr.io", - TokenAudience: "https://management.azure.com/", - APIManagementHostNameSuffix: "azure-api.net", - SynapseEndpointSuffix: "dev.azuresynapse.net", - DatalakeSuffix: "azuredatalakestore.net", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.windows.net/", - KeyVault: "https://vault.azure.net", - Datalake: "https://datalake.azure.net/", - Batch: "https://batch.core.windows.net/", - OperationalInsights: "https://api.loganalytics.io", - OSSRDBMS: "https://ossrdbms-aad.database.windows.net", - Storage: "https://storage.azure.com/", - Synapse: "https://dev.azuresynapse.net", - ServiceBus: "https://servicebus.azure.net/", - SQLDatabase: "https://database.windows.net/", - CosmosDB: "https://cosmos.azure.com", - ManagedHSM: "https://managedhsm.azure.net", - MicrosoftGraph: "https://graph.microsoft.com/", - }, - } - - // USGovernmentCloud is the cloud environment for the US Government - USGovernmentCloud = Environment{ - Name: "AzureUSGovernmentCloud", - ManagementPortalURL: "https://manage.windowsazure.us/", - PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", - ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", - GalleryEndpoint: "https://gallery.usgovcloudapi.net/", - KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - ManagedHSMEndpoint: NotAvailable, - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", - BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", - MicrosoftGraphEndpoint: "https://graph.microsoft.us/", - StorageEndpointSuffix: "core.usgovcloudapi.net", - CosmosDBDNSSuffix: "documents.azure.us", - MariaDBDNSSuffix: "mariadb.database.usgovcloudapi.net", - MySQLDatabaseDNSSuffix: "mysql.database.usgovcloudapi.net", - PostgresqlDatabaseDNSSuffix: "postgres.database.usgovcloudapi.net", - SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", - TrafficManagerDNSSuffix: "usgovtrafficmanager.net", - KeyVaultDNSSuffix: "vault.usgovcloudapi.net", - ManagedHSMDNSSuffix: NotAvailable, - ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", - ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", - ContainerRegistryDNSSuffix: "azurecr.us", - TokenAudience: "https://management.usgovcloudapi.net/", - APIManagementHostNameSuffix: "azure-api.us", - SynapseEndpointSuffix: "dev.azuresynapse.usgovcloudapi.net", - DatalakeSuffix: NotAvailable, - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.windows.net/", - KeyVault: "https://vault.usgovcloudapi.net", - Datalake: NotAvailable, - Batch: "https://batch.core.usgovcloudapi.net/", - OperationalInsights: "https://api.loganalytics.us", - OSSRDBMS: "https://ossrdbms-aad.database.usgovcloudapi.net", - Storage: "https://storage.azure.com/", - Synapse: "https://dev.azuresynapse.usgovcloudapi.net", - ServiceBus: "https://servicebus.azure.net/", - SQLDatabase: "https://database.usgovcloudapi.net/", - CosmosDB: "https://cosmos.azure.com", - ManagedHSM: NotAvailable, - MicrosoftGraph: "https://graph.microsoft.us/", - }, - } - - // ChinaCloud is the cloud environment operated in China - ChinaCloud = Environment{ - Name: "AzureChinaCloud", - ManagementPortalURL: "https://manage.chinacloudapi.com/", - PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", - ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", - GalleryEndpoint: "https://gallery.chinacloudapi.cn/", - KeyVaultEndpoint: "https://vault.azure.cn/", - ManagedHSMEndpoint: NotAvailable, - GraphEndpoint: "https://graph.chinacloudapi.cn/", - ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", - BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", - MicrosoftGraphEndpoint: "https://microsoftgraph.chinacloudapi.cn/", - StorageEndpointSuffix: "core.chinacloudapi.cn", - CosmosDBDNSSuffix: "documents.azure.cn", - MariaDBDNSSuffix: "mariadb.database.chinacloudapi.cn", - MySQLDatabaseDNSSuffix: "mysql.database.chinacloudapi.cn", - PostgresqlDatabaseDNSSuffix: "postgres.database.chinacloudapi.cn", - SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", - TrafficManagerDNSSuffix: "trafficmanager.cn", - KeyVaultDNSSuffix: "vault.azure.cn", - ManagedHSMDNSSuffix: NotAvailable, - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", - ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", - ContainerRegistryDNSSuffix: "azurecr.cn", - TokenAudience: "https://management.chinacloudapi.cn/", - APIManagementHostNameSuffix: "azure-api.cn", - SynapseEndpointSuffix: "dev.azuresynapse.azure.cn", - DatalakeSuffix: NotAvailable, - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.chinacloudapi.cn/", - KeyVault: "https://vault.azure.cn", - Datalake: NotAvailable, - Batch: "https://batch.chinacloudapi.cn/", - OperationalInsights: NotAvailable, - OSSRDBMS: "https://ossrdbms-aad.database.chinacloudapi.cn", - Storage: "https://storage.azure.com/", - Synapse: "https://dev.azuresynapse.net", - ServiceBus: "https://servicebus.azure.net/", - SQLDatabase: "https://database.chinacloudapi.cn/", - CosmosDB: "https://cosmos.azure.com", - ManagedHSM: NotAvailable, - MicrosoftGraph: "https://microsoftgraph.chinacloudapi.cn", - }, - } - - // GermanCloud is the cloud environment operated in Germany - GermanCloud = Environment{ - Name: "AzureGermanCloud", - ManagementPortalURL: "http://portal.microsoftazure.de/", - PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.cloudapi.de/", - ResourceManagerEndpoint: "https://management.microsoftazure.de/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", - GalleryEndpoint: "https://gallery.cloudapi.de/", - KeyVaultEndpoint: "https://vault.microsoftazure.de/", - ManagedHSMEndpoint: NotAvailable, - GraphEndpoint: "https://graph.cloudapi.de/", - ServiceBusEndpoint: "https://servicebus.cloudapi.de/", - BatchManagementEndpoint: "https://batch.cloudapi.de/", - MicrosoftGraphEndpoint: NotAvailable, - StorageEndpointSuffix: "core.cloudapi.de", - CosmosDBDNSSuffix: "documents.microsoftazure.de", - MariaDBDNSSuffix: "mariadb.database.cloudapi.de", - MySQLDatabaseDNSSuffix: "mysql.database.cloudapi.de", - PostgresqlDatabaseDNSSuffix: "postgres.database.cloudapi.de", - SQLDatabaseDNSSuffix: "database.cloudapi.de", - TrafficManagerDNSSuffix: "azuretrafficmanager.de", - KeyVaultDNSSuffix: "vault.microsoftazure.de", - ManagedHSMDNSSuffix: NotAvailable, - ServiceBusEndpointSuffix: "servicebus.cloudapi.de", - ServiceManagementVMDNSSuffix: "azurecloudapp.de", - ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: NotAvailable, - TokenAudience: "https://management.microsoftazure.de/", - APIManagementHostNameSuffix: NotAvailable, - SynapseEndpointSuffix: NotAvailable, - DatalakeSuffix: NotAvailable, - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.cloudapi.de/", - KeyVault: "https://vault.microsoftazure.de", - Datalake: NotAvailable, - Batch: "https://batch.cloudapi.de/", - OperationalInsights: NotAvailable, - OSSRDBMS: "https://ossrdbms-aad.database.cloudapi.de", - Storage: "https://storage.azure.com/", - Synapse: NotAvailable, - ServiceBus: "https://servicebus.azure.net/", - SQLDatabase: "https://database.cloudapi.de/", - CosmosDB: "https://cosmos.azure.com", - ManagedHSM: NotAvailable, - MicrosoftGraph: NotAvailable, - }, - } -) - -// EnvironmentFromName returns an Environment based on the common name specified. -func EnvironmentFromName(name string) (Environment, error) { - // IMPORTANT - // As per @radhikagupta5: - // This is technical debt, fundamentally here because Kubernetes is not currently accepting - // contributions to the providers. Once that is an option, the provider should be updated to - // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation - // from this method based on the name that is provided to us. - if strings.EqualFold(name, "AZURESTACKCLOUD") { - return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) - } - - name = strings.ToUpper(name) - env, ok := environments[name] - if !ok { - return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) - } - - return env, nil -} - -// EnvironmentFromFile loads an Environment from a configuration file available on disk. -// This function is particularly useful in the Hybrid Cloud model, where one must define their own -// endpoints. -func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { - fileContents, err := ioutil.ReadFile(location) - if err != nil { - return - } - - err = json.Unmarshal(fileContents, &unmarshaled) - - return -} - -// SetEnvironment updates the environment map with the specified values. -func SetEnvironment(name string, env Environment) { - environments[strings.ToUpper(name)] = env -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go deleted file mode 100644 index 507f9e9..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go +++ /dev/null @@ -1,245 +0,0 @@ -package azure - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -type audience []string - -type authentication struct { - LoginEndpoint string `json:"loginEndpoint"` - Audiences audience `json:"audiences"` -} - -type environmentMetadataInfo struct { - GalleryEndpoint string `json:"galleryEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - PortalEndpoint string `json:"portalEndpoint"` - Authentication authentication `json:"authentication"` -} - -// EnvironmentProperty represent property names that clients can override -type EnvironmentProperty string - -const ( - // EnvironmentName ... - EnvironmentName EnvironmentProperty = "name" - // EnvironmentManagementPortalURL .. - EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" - // EnvironmentPublishSettingsURL ... - EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" - // EnvironmentServiceManagementEndpoint ... - EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" - // EnvironmentResourceManagerEndpoint ... - EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" - // EnvironmentActiveDirectoryEndpoint ... - EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" - // EnvironmentGalleryEndpoint ... - EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" - // EnvironmentKeyVaultEndpoint ... - EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" - // EnvironmentGraphEndpoint ... - EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" - // EnvironmentServiceBusEndpoint ... - EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" - // EnvironmentBatchManagementEndpoint ... - EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" - // EnvironmentStorageEndpointSuffix ... - EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" - // EnvironmentSQLDatabaseDNSSuffix ... - EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" - // EnvironmentTrafficManagerDNSSuffix ... - EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" - // EnvironmentKeyVaultDNSSuffix ... - EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" - // EnvironmentServiceBusEndpointSuffix ... - EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" - // EnvironmentServiceManagementVMDNSSuffix ... - EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" - // EnvironmentResourceManagerVMDNSSuffix ... - EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" - // EnvironmentContainerRegistryDNSSuffix ... - EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" - // EnvironmentTokenAudience ... - EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" -) - -// OverrideProperty represents property name and value that clients can override -type OverrideProperty struct { - Key EnvironmentProperty - Value string -} - -// EnvironmentFromURL loads an Environment from a URL -// This function is particularly useful in the Hybrid Cloud model, where one may define their own -// endpoints. -func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { - var metadataEnvProperties environmentMetadataInfo - - if resourceManagerEndpoint == "" { - return environment, fmt.Errorf("Metadata resource manager endpoint is empty") - } - - if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { - return environment, err - } - - // Give priority to user's override values - overrideProperties(&environment, properties) - - if environment.Name == "" { - environment.Name = "HybridEnvironment" - } - stampDNSSuffix := environment.StorageEndpointSuffix - if stampDNSSuffix == "" { - stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") - environment.StorageEndpointSuffix = stampDNSSuffix - } - if environment.KeyVaultDNSSuffix == "" { - environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) - } - if environment.KeyVaultEndpoint == "" { - environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) - } - if environment.TokenAudience == "" { - environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] - } - if environment.ActiveDirectoryEndpoint == "" { - environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint - } - if environment.ResourceManagerEndpoint == "" { - environment.ResourceManagerEndpoint = resourceManagerEndpoint - } - if environment.GalleryEndpoint == "" { - environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint - } - if environment.GraphEndpoint == "" { - environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint - } - - return environment, nil -} - -func overrideProperties(environment *Environment, properties []OverrideProperty) { - for _, property := range properties { - switch property.Key { - case EnvironmentName: - { - environment.Name = property.Value - } - case EnvironmentManagementPortalURL: - { - environment.ManagementPortalURL = property.Value - } - case EnvironmentPublishSettingsURL: - { - environment.PublishSettingsURL = property.Value - } - case EnvironmentServiceManagementEndpoint: - { - environment.ServiceManagementEndpoint = property.Value - } - case EnvironmentResourceManagerEndpoint: - { - environment.ResourceManagerEndpoint = property.Value - } - case EnvironmentActiveDirectoryEndpoint: - { - environment.ActiveDirectoryEndpoint = property.Value - } - case EnvironmentGalleryEndpoint: - { - environment.GalleryEndpoint = property.Value - } - case EnvironmentKeyVaultEndpoint: - { - environment.KeyVaultEndpoint = property.Value - } - case EnvironmentGraphEndpoint: - { - environment.GraphEndpoint = property.Value - } - case EnvironmentServiceBusEndpoint: - { - environment.ServiceBusEndpoint = property.Value - } - case EnvironmentBatchManagementEndpoint: - { - environment.BatchManagementEndpoint = property.Value - } - case EnvironmentStorageEndpointSuffix: - { - environment.StorageEndpointSuffix = property.Value - } - case EnvironmentSQLDatabaseDNSSuffix: - { - environment.SQLDatabaseDNSSuffix = property.Value - } - case EnvironmentTrafficManagerDNSSuffix: - { - environment.TrafficManagerDNSSuffix = property.Value - } - case EnvironmentKeyVaultDNSSuffix: - { - environment.KeyVaultDNSSuffix = property.Value - } - case EnvironmentServiceBusEndpointSuffix: - { - environment.ServiceBusEndpointSuffix = property.Value - } - case EnvironmentServiceManagementVMDNSSuffix: - { - environment.ServiceManagementVMDNSSuffix = property.Value - } - case EnvironmentResourceManagerVMDNSSuffix: - { - environment.ResourceManagerVMDNSSuffix = property.Value - } - case EnvironmentContainerRegistryDNSSuffix: - { - environment.ContainerRegistryDNSSuffix = property.Value - } - case EnvironmentTokenAudience: - { - environment.TokenAudience = property.Value - } - } - } -} - -func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { - client := autorest.NewClientWithUserAgent("") - managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") - req, _ := http.NewRequest("GET", managementEndpoint, nil) - response, err := client.Do(req) - if err != nil { - return environment, err - } - defer response.Body.Close() - jsonResponse, err := ioutil.ReadAll(response.Body) - if err != nil { - return environment, err - } - err = json.Unmarshal(jsonResponse, &environment) - return environment, err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go deleted file mode 100644 index 5b52357..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" -) - -// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. -// It also handles request retries -func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(r) - for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - - resp, err = autorest.SendWithSender(s, rr.Request(), - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return resp, err - } - - if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { - return resp, err - } - - var re RequestError - if strings.Contains(r.Header.Get("Content-Type"), "xml") { - // XML errors (e.g. Storage Data Plane) only return the inner object - err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError)) - } else { - err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re)) - } - - if err != nil { - return resp, err - } - err = re - - if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { - regErr := register(client, r, re) - if regErr != nil { - return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %w", regErr, err) - } - } - } - return resp, err - }) - } -} - -func getProvider(re RequestError) (string, error) { - if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { - return re.ServiceError.Details[0]["target"].(string), nil - } - return "", errors.New("provider was not found in the response") -} - -func register(client autorest.Client, originalReq *http.Request, re RequestError) error { - subID := getSubscription(originalReq.URL.Path) - if subID == "" { - return errors.New("missing parameter subscriptionID to register resource provider") - } - providerName, err := getProvider(re) - if err != nil { - return fmt.Errorf("missing parameter provider to register resource provider: %s", err) - } - newURL := url.URL{ - Scheme: originalReq.URL.Scheme, - Host: originalReq.URL.Host, - } - - // taken from the resources SDK - // with almost identical code, this sections are easier to mantain - // It is also not a good idea to import the SDK here - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": autorest.Encode("path", providerName), - "subscriptionId": autorest.Encode("path", subID), - } - - const APIVersion = "2016-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - - req, err := preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - type Provider struct { - RegistrationState *string `json:"registrationState,omitempty"` - } - var provider Provider - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - // poll for registered provisioning state - registrationStartTime := time.Now() - for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { - // taken from the resources SDK - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - req, err = preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - if provider.RegistrationState != nil && - *provider.RegistrationState == "Registered" { - break - } - - delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) - if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { - return originalReq.Context().Err() - } - } - if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { - return errors.New("polling for resource provider registration has exceeded the polling duration") - } - return err -} - -func getSubscription(path string) string { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1] - } - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go deleted file mode 100644 index bb5f939..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ /dev/null @@ -1,328 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "strings" - "time" - - "github.com/Azure/go-autorest/logger" -) - -const ( - // DefaultPollingDelay is a reasonable delay between polling requests. - DefaultPollingDelay = 30 * time.Second - - // DefaultPollingDuration is a reasonable total polling duration. - DefaultPollingDuration = 15 * time.Minute - - // DefaultRetryAttempts is number of attempts for retry status codes (5xx). - DefaultRetryAttempts = 3 - - // DefaultRetryDuration is the duration to wait between retries. - DefaultRetryDuration = 30 * time.Second -) - -var ( - // StatusCodesForRetry are a defined group of status code for which the client will retry - StatusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -const ( - requestFormat = `HTTP Request Begin =================================================== -%s -===================================================== HTTP Request End -` - responseFormat = `HTTP Response Begin =================================================== -%s -===================================================== HTTP Response End -` -) - -// Response serves as the base for all responses from generated clients. It provides access to the -// last http.Response. -type Response struct { - *http.Response `json:"-"` -} - -// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. -// If there was no response (i.e. the underlying http.Response is nil) the return value is false. -func (r Response) IsHTTPStatus(statusCode int) bool { - if r.Response == nil { - return false - } - return r.Response.StatusCode == statusCode -} - -// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. -// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided -// the return value is false. -func (r Response) HasHTTPStatus(statusCodes ...int) bool { - return ResponseHasStatusCode(r.Response, statusCodes...) -} - -// LoggingInspector implements request and response inspectors that log the full request and -// response to a supplied log. -type LoggingInspector struct { - Logger *log.Logger -} - -// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - var body, b bytes.Buffer - - defer r.Body.Close() - - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) - if err := r.Write(&b); err != nil { - return nil, fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(requestFormat, b.String()) - - r.Body = ioutil.NopCloser(&body) - return p.Prepare(r) - }) - } -} - -// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - var body, b bytes.Buffer - defer resp.Body.Close() - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) - if err := resp.Write(&b); err != nil { - return fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(responseFormat, b.String()) - - resp.Body = ioutil.NopCloser(&body) - return r.Respond(resp) - }) - } -} - -// Client is the base for autorest generated clients. It provides default, "do nothing" -// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the -// standard, undecorated http.Client as a default Sender. -// -// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and -// return responses that compose with Response. -// -// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom -// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit -// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence -// sending the request by providing a decorated Sender. -type Client struct { - Authorizer Authorizer - Sender Sender - RequestInspector PrepareDecorator - ResponseInspector RespondDecorator - - // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header - PollingDelay time.Duration - - // PollingDuration sets the maximum polling time after which an error is returned. - // Setting this to zero will use the provided context to control the duration. - PollingDuration time.Duration - - // RetryAttempts sets the total number of times the client will attempt to make an HTTP request. - // Set the value to 1 to disable retries. DO NOT set the value to less than 1. - RetryAttempts int - - // RetryDuration sets the delay duration for retries. - RetryDuration time.Duration - - // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent - // through the Do method. - UserAgent string - - Jar http.CookieJar - - // Set to true to skip attempted registration of resource providers (false by default). - SkipResourceProviderRegistration bool - - // SendDecorators can be used to override the default chain of SendDecorators. - // This can be used to specify things like a custom retry SendDecorator. - // Set this to an empty slice to use no SendDecorators. - SendDecorators []SendDecorator -} - -// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed -// string. -func NewClientWithUserAgent(ua string) Client { - return newClient(ua, tls.RenegotiateNever) -} - -// ClientOptions contains various Client configuration options. -type ClientOptions struct { - // UserAgent is an optional user-agent string to append to the default user agent. - UserAgent string - - // Renegotiation is an optional setting to control client-side TLS renegotiation. - Renegotiation tls.RenegotiationSupport -} - -// NewClientWithOptions returns an instance of a Client with the specified values. -func NewClientWithOptions(options ClientOptions) Client { - return newClient(options.UserAgent, options.Renegotiation) -} - -func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { - c := Client{ - PollingDelay: DefaultPollingDelay, - PollingDuration: DefaultPollingDuration, - RetryAttempts: DefaultRetryAttempts, - RetryDuration: DefaultRetryDuration, - UserAgent: UserAgent(), - } - c.Sender = c.sender(renegotiation) - c.AddToUserAgent(ua) - return c -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) -} - -// Do implements the Sender interface by invoking the active Sender after applying authorization. -// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent -// is set, apply set the User-Agent header. -func (c Client) Do(r *http.Request) (*http.Response, error) { - if r.UserAgent() == "" { - r, _ = Prepare(r, - WithUserAgent(c.UserAgent)) - } - // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations - r, err := Prepare(r, - c.WithAuthorization(), - c.WithInspection()) - if err != nil { - var resp *http.Response - if detErr, ok := err.(DetailedError); ok { - // if the authorization failed (e.g. invalid credentials) there will - // be a response associated with the error, be sure to return it. - resp = detErr.Response - } - return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") - } - logger.Instance.WriteRequest(r, logger.Filter{ - Header: func(k string, v []string) (bool, []string) { - // remove the auth token from the log - if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { - v = []string{"**REDACTED**"} - } - return true, v - }, - }) - resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) - if resp == nil && err == nil { - err = errors.New("autorest: received nil response and error") - } - logger.Instance.WriteResponse(resp, logger.Filter{}) - Respond(resp, c.ByInspecting()) - return resp, err -} - -// sender returns the Sender to which to send requests. -func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { - if c.Sender == nil { - return sender(renengotiation) - } - return c.Sender -} - -// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator -// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. -func (c Client) WithAuthorization() PrepareDecorator { - return c.authorizer().WithAuthorization() -} - -// authorizer returns the Authorizer to use. -func (c Client) authorizer() Authorizer { - if c.Authorizer == nil { - return NullAuthorizer{} - } - return c.Authorizer -} - -// WithInspection is a convenience method that passes the request to the supplied RequestInspector, -// if present, or returns the WithNothing PrepareDecorator otherwise. -func (c Client) WithInspection() PrepareDecorator { - if c.RequestInspector == nil { - return WithNothing() - } - return c.RequestInspector -} - -// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, -// if present, or returns the ByIgnoring RespondDecorator otherwise. -func (c Client) ByInspecting() RespondDecorator { - if c.ResponseInspector == nil { - return ByIgnoring() - } - return c.ResponseInspector -} - -// Send sends the provided http.Request using the client's Sender or the default sender. -// It returns the http.Response and possible error. It also accepts a, possibly empty, -// default set of SendDecorators used when sending the request. -// SendDecorators have the following precedence: -// 1. In a request's context via WithSendDecorators() -// 2. Specified on the client in SendDecorators -// 3. The default values specified in this method -func (c Client) Send(req *http.Request, decorators ...SendDecorator) (*http.Response, error) { - if c.SendDecorators != nil { - decorators = c.SendDecorators - } - inCtx := req.Context().Value(ctxSendDecorators{}) - if sd, ok := inCtx.([]SendDecorator); ok { - decorators = sd - } - return SendWithSender(c, req, decorators...) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE deleted file mode 100644 index b9d6a27..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go deleted file mode 100644 index c457106..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) -defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of -time.Time types. And both convert to time.Time through a ToTime method. -*/ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "time" -) - -const ( - fullDate = "2006-01-02" - fullDateJSON = `"2006-01-02"` - dateFormat = "%04d-%02d-%02d" - jsonFormat = `"%04d-%02d-%02d"` -) - -// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., -// 2006-01-02). -type Date struct { - time.Time -} - -// ParseDate create a new Date from the passed string. -func ParseDate(date string) (d Date, err error) { - return parseDate(date, fullDate) -} - -func parseDate(date string, format string) (Date, error) { - d, err := time.Parse(format, date) - return Date{Time: d}, err -} - -// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalBinary() ([]byte, error) { - return d.MarshalText() -} - -// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalBinary(data []byte) error { - return d.UnmarshalText(data) -} - -// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalJSON() (json []byte, err error) { - return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalJSON(data []byte) (err error) { - d.Time, err = time.Parse(fullDateJSON, string(data)) - return err -} - -// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalText() (text []byte, err error) { - return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalText(data []byte) (err error) { - d.Time, err = time.Parse(fullDate, string(data)) - return err -} - -// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). -func (d Date) String() string { - return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) -} - -// ToTime returns a Date as a time.Time -func (d Date) ToTime() time.Time { - return d.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go deleted file mode 100644 index 4e05432..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go deleted file mode 100644 index b453fad..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ /dev/null @@ -1,103 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "regexp" - "time" -) - -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -const ( - azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` - azureUtcFormat = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` - rfc3339 = time.RFC3339Nano - tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` -) - -// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -type Time struct { - time.Time -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalBinary() ([]byte, error) { - return t.Time.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalJSON() (json []byte, err error) { - return t.Time.MarshalJSON() -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalJSON(data []byte) (err error) { - timeFormat := azureUtcFormatJSON - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339JSON - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalText() (text []byte, err error) { - return t.Time.MarshalText() -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalText(data []byte) (err error) { - timeFormat := azureUtcFormat - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339 - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// String returns the Time formatted as an RFC3339 date-time string (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) String() string { - // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} - -// ToTime returns a Time as a time.Time -func (t Time) ToTime() time.Time { - return t.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go deleted file mode 100644 index 48fb39b..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ /dev/null @@ -1,100 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "time" -) - -const ( - rfc1123JSON = `"` + time.RFC1123 + `"` - rfc1123 = time.RFC1123 -) - -// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -type TimeRFC1123 struct { - time.Time -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123JSON, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalJSON() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") - } - b := []byte(t.Format(rfc1123JSON)) - return b, nil -} - -// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalText() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") - } - - b := []byte(t.Format(rfc1123)) - return b, nil -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalBinary() ([]byte, error) { - return t.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// ToTime returns a Time as a time.Time -func (t TimeRFC1123) ToTime() time.Time { - return t.Time -} - -// String returns the Time formatted as an RFC1123 date-time string (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) String() string { - // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go deleted file mode 100644 index 7073959..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go +++ /dev/null @@ -1,123 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "time" -) - -// unixEpoch is the moment in time that should be treated as timestamp 0. -var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) - -// UnixTime marshals and unmarshals a time that is represented as the number -// of seconds (ignoring skip-seconds) since the Unix Epoch. -type UnixTime time.Time - -// Duration returns the time as a Duration since the UnixEpoch. -func (t UnixTime) Duration() time.Duration { - return time.Time(t).Sub(unixEpoch) -} - -// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. -func NewUnixTimeFromSeconds(seconds float64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) -} - -// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. -func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(nanoseconds)) -} - -// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. -func NewUnixTimeFromDuration(dur time.Duration) UnixTime { - return UnixTime(unixEpoch.Add(dur)) -} - -// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' -func UnixEpoch() time.Time { - return unixEpoch -} - -// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. -// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) -func (t UnixTime) MarshalJSON() ([]byte, error) { - buffer := &bytes.Buffer{} - enc := json.NewEncoder(buffer) - err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) - if err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since -// midnight January 1st, 1970. -func (t *UnixTime) UnmarshalJSON(text []byte) error { - dec := json.NewDecoder(bytes.NewReader(text)) - - var secondsSinceEpoch float64 - if err := dec.Decode(&secondsSinceEpoch); err != nil { - return err - } - - *t = NewUnixTimeFromSeconds(secondsSinceEpoch) - - return nil -} - -// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. -func (t UnixTime) MarshalText() ([]byte, error) { - cast := time.Time(t) - return cast.MarshalText() -} - -// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. -func (t *UnixTime) UnmarshalText(raw []byte) error { - var unmarshaled time.Time - - if err := unmarshaled.UnmarshalText(raw); err != nil { - return err - } - - *t = UnixTime(unmarshaled) - return nil -} - -// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. -func (t UnixTime) MarshalBinary() ([]byte, error) { - buf := &bytes.Buffer{} - - payload := int64(t.Duration()) - - if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. -func (t *UnixTime) UnmarshalBinary(raw []byte) error { - var nanosecondsSinceEpoch int64 - - if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { - return err - } - *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go deleted file mode 100644 index 12addf0..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ /dev/null @@ -1,25 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// ParseTime to parse Time string to specified format. -func ParseTime(format string, t string) (d time.Time, err error) { - return time.Parse(format, strings.ToUpper(t)) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go deleted file mode 100644 index 35098ed..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ /dev/null @@ -1,103 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" -) - -const ( - // UndefinedStatusCode is used when HTTP status code is not available for an error. - UndefinedStatusCode = 0 -) - -// DetailedError encloses a error with details of the package, method, and associated HTTP -// status code (if any). -type DetailedError struct { - Original error - - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. - StatusCode interface{} - - // Message is the error message. - Message string - - // Service Error is the response body of failed API in bytes - ServiceError []byte - - // Response is the response object that was returned during failure if applicable. - Response *http.Response -} - -// NewError creates a new Error conforming object from the passed packageType, method, and -// message. message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, nil, message, args...) -} - -// NewErrorWithResponse creates a new Error conforming object from the passed -// packageType, method, statusCode of the given resp (UndefinedStatusCode if -// resp is nil), and message. message is treated as a format string to which the -// optional args apply. -func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, resp, message, args...) -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - if v, ok := original.(DetailedError); ok { - return v - } - - statusCode := UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - - return DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - Response: resp, - } -} - -// Error returns a formatted containing all available details (i.e., PackageType, Method, -// StatusCode, Message, and original error (if any)). -func (e DetailedError) Error() string { - if e.Original == nil { - return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) - } - return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) -} - -// Unwrap returns the original error. -func (e DetailedError) Unwrap() error { - return e.Original -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go deleted file mode 100644 index 792f82d..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build modhack -// +build modhack - -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go deleted file mode 100644 index 121a66f..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ /dev/null @@ -1,549 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strings" -) - -const ( - mimeTypeJSON = "application/json" - mimeTypeOctetStream = "application/octet-stream" - mimeTypeFormPost = "application/x-www-form-urlencoded" - - headerAuthorization = "Authorization" - headerAuxAuthorization = "x-ms-authorization-auxiliary" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" -) - -// used as a key type in context.WithValue() -type ctxPrepareDecorators struct{} - -// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. -// If no PrepareDecorators are provided the context is unchanged. -func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { - if len(prepareDecorator) == 0 { - return ctx - } - return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) -} - -// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. -func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { - inCtx := ctx.Value(ctxPrepareDecorators{}) - if pd, ok := inCtx.([]PrepareDecorator); ok { - return pd - } - return defaultPrepareDecorators -} - -// Preparer is the interface that wraps the Prepare method. -// -// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations -// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. -type Preparer interface { - Prepare(*http.Request) (*http.Request, error) -} - -// PreparerFunc is a method that implements the Preparer interface. -type PreparerFunc func(*http.Request) (*http.Request, error) - -// Prepare implements the Preparer interface on PreparerFunc. -func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { - return pf(r) -} - -// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then affect the result. -type PrepareDecorator func(Preparer) Preparer - -// CreatePreparer creates, decorates, and returns a Preparer. -// Without decorators, the returned Preparer returns the passed http.Request unmodified. -// Preparers are safe to share and re-use. -func CreatePreparer(decorators ...PrepareDecorator) Preparer { - return DecoratePreparer( - Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), - decorators...) -} - -// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it -// applies to the Preparer. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (change the http.Request and then pass it -// along) or a post-decorator (pass the http.Request along and alter it on return). -func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { - for _, decorate := range decorators { - p = decorate(p) - } - return p -} - -// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. -// It creates a Preparer from the decorators which it then applies to the passed http.Request. -func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { - if r == nil { - return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") - } - return CreatePreparer(decorators...).Prepare(r) -} - -// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed -// http.Request. -func WithNothing() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return p.Prepare(r) - }) - } -} - -// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to -// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before -// adding the header. -func WithHeader(header string, value string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - setHeader(r, http.CanonicalHeaderKey(header), value) - } - return r, err - }) - } -} - -// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to -// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before -// adding them. -func WithHeaders(headers map[string]interface{}) PrepareDecorator { - h := ensureValueStrings(headers) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - - for name, value := range h { - r.Header.Set(http.CanonicalHeaderKey(name), value) - } - } - return r, err - }) - } -} - -// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the supplied token. -func WithBearerAuthorization(token string) PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) -} - -// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value -// is the passed contentType. -func AsContentType(contentType string) PrepareDecorator { - return WithHeader(headerContentType, contentType) -} - -// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the -// passed string. -func WithUserAgent(ua string) PrepareDecorator { - return WithHeader(headerUserAgent, ua) -} - -// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/x-www-form-urlencoded". -func AsFormURLEncoded() PrepareDecorator { - return AsContentType(mimeTypeFormPost) -} - -// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/json". -func AsJSON() PrepareDecorator { - return AsContentType(mimeTypeJSON) -} - -// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. -func AsOctetStream() PrepareDecorator { - return AsContentType(mimeTypeOctetStream) -} - -// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The -// decorator does not validate that the passed method string is a known HTTP method. -func WithMethod(method string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Method = method - return p.Prepare(r) - }) - } -} - -// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. -func AsDelete() PrepareDecorator { return WithMethod("DELETE") } - -// AsGet returns a PrepareDecorator that sets the HTTP method to GET. -func AsGet() PrepareDecorator { return WithMethod("GET") } - -// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. -func AsHead() PrepareDecorator { return WithMethod("HEAD") } - -// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. -func AsMerge() PrepareDecorator { return WithMethod("MERGE") } - -// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. -func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } - -// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. -func AsPatch() PrepareDecorator { return WithMethod("PATCH") } - -// AsPost returns a PrepareDecorator that sets the HTTP method to POST. -func AsPost() PrepareDecorator { return WithMethod("POST") } - -// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. -func AsPut() PrepareDecorator { return WithMethod("PUT") } - -// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed -// from the supplied baseUrl. Query parameters will be encoded as required. -func WithBaseURL(baseURL string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var u *url.URL - if u, err = url.Parse(baseURL); err != nil { - return r, err - } - if u.Scheme == "" { - return r, fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) - } - if u.RawQuery != "" { - // handle unencoded semicolons (ideally the server would send them already encoded) - u.RawQuery = strings.Replace(u.RawQuery, ";", "%3B", -1) - q, err := url.ParseQuery(u.RawQuery) - if err != nil { - return r, err - } - u.RawQuery = q.Encode() - } - r.URL = u - } - return r, err - }) - } -} - -// WithBytes returns a PrepareDecorator that takes a list of bytes -// which passes the bytes directly to the body -func WithBytes(input *[]byte) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if input == nil { - return r, fmt.Errorf("Input Bytes was nil") - } - - r.ContentLength = int64(len(*input)) - r.Body = ioutil.NopCloser(bytes.NewReader(*input)) - } - return r, err - }) - } -} - -// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the -// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. -func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(urlParameters) - for key, value := range parameters { - baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) - } - return WithBaseURL(baseURL) -} - -// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the -// http.Request body. -func WithFormData(v url.Values) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - s := v.Encode() - - setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) - r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) - } - return r, err - }) - } -} - -// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters -// into the http.Request body. -func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var body bytes.Buffer - writer := multipart.NewWriter(&body) - for key, value := range formDataParameters { - if rc, ok := value.(io.ReadCloser); ok { - var fd io.Writer - if fd, err = writer.CreateFormFile(key, key); err != nil { - return r, err - } - if _, err = io.Copy(fd, rc); err != nil { - return r, err - } - } else { - if err = writer.WriteField(key, ensureValueString(value)); err != nil { - return r, err - } - } - } - if err = writer.Close(); err != nil { - return r, err - } - setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) - r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - r.ContentLength = int64(body.Len()) - return r, err - } - return r, err - }) - } -} - -// WithFile returns a PrepareDecorator that sends file in request body. -func WithFile(f io.ReadCloser) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := ioutil.ReadAll(f) - if err != nil { - return r, err - } - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - r.ContentLength = int64(len(b)) - } - return r, err - }) - } -} - -// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request -// and sets the Content-Length header. -func WithBool(v bool) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the -// request and sets the Content-Length header. -func WithFloat32(v float32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the -// request and sets the Content-Length header. -func WithFloat64(v float64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request -// and sets the Content-Length header. -func WithInt32(v int32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request -// and sets the Content-Length header. -func WithInt64(v int64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithString returns a PrepareDecorator that encodes the passed string into the body of the request -// and sets the Content-Length header. -func WithString(v string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - r.ContentLength = int64(len(v)) - r.Body = ioutil.NopCloser(strings.NewReader(v)) - } - return r, err - }) - } -} - -// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the -// request and sets the Content-Length header. -func WithJSON(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := json.Marshal(v) - if err == nil { - r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - } - return r, err - }) - } -} - -// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the -// request and sets the Content-Length header. -func WithXML(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := xml.Marshal(v) - if err == nil { - // we have to tack on an XML header - withHeader := xml.Header + string(b) - bytesWithHeader := []byte(withHeader) - - r.ContentLength = int64(len(bytesWithHeader)) - setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader))) - r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) - } - } - return r, err - }) - } -} - -// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path -// is absolute (that is, it begins with a "/"), it replaces the existing path. -func WithPath(path string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPath", "Invoked with a nil URL") - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The -// values will be escaped (aka URL encoded) before insertion into the path. -func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := escapeValueStrings(ensureValueStrings(pathParameters)) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. -func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(pathParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -func parseURL(u *url.URL, path string) (*url.URL, error) { - p := strings.TrimRight(u.String(), "/") - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return url.Parse(p + path) -} - -// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters -// given in the supplied map (i.e., key=value). -func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := MapToValues(queryParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") - } - v := r.URL.Query() - for key, value := range parameters { - for i := range value { - d, err := url.QueryUnescape(value[i]) - if err != nil { - return r, err - } - value[i] = d - } - v[key] = value - } - r.URL.RawQuery = v.Encode() - } - return r, err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go deleted file mode 100644 index 349e196..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ /dev/null @@ -1,269 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Responder is the interface that wraps the Respond method. -// -// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold -// state since Responders may be shared and re-used. -type Responder interface { - Respond(*http.Response) error -} - -// ResponderFunc is a method that implements the Responder interface. -type ResponderFunc func(*http.Response) error - -// Respond implements the Responder interface on ResponderFunc. -func (rf ResponderFunc) Respond(r *http.Response) error { - return rf(r) -} - -// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to -// the http.Response and pass it along or, first, pass the http.Response along then react. -type RespondDecorator func(Responder) Responder - -// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned -// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share -// and re-used: It depends on the applied decorators. For example, a standard decorator that closes -// the response body is fine to share whereas a decorator that reads the body into a passed struct -// is not. -// -// To prevent memory leaks, ensure that at least one Responder closes the response body. -func CreateResponder(decorators ...RespondDecorator) Responder { - return DecorateResponder( - Responder(ResponderFunc(func(r *http.Response) error { return nil })), - decorators...) -} - -// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it -// applies to the Responder. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (react to the http.Response and then pass it -// along) or a post-decorator (pass the http.Response along and then react). -func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { - for _, decorate := range decorators { - r = decorate(r) - } - return r -} - -// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. -// It creates a Responder from the decorators it then applies to the passed http.Response. -func Respond(r *http.Response, decorators ...RespondDecorator) error { - if r == nil { - return nil - } - return CreateResponder(decorators...).Respond(r) -} - -// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined -// to the next RespondDecorator. -func ByIgnoring() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - return r.Respond(resp) - }) - } -} - -// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as -// the Body is read. -func ByCopying(b *bytes.Buffer) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - resp.Body = TeeReadCloser(resp.Body, b) - } - return err - }) - } -} - -// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which -// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed -// Responder is invoked prior to discarding the response body, the decorator may occur anywhere -// within the set. -func ByDiscardingBody() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - return fmt.Errorf("Error discarding the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it -// closes the response body. Since the passed Responder is invoked prior to closing the response -// body, the decorator may occur anywhere within the set. -func ByClosing() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which -// it closes the response if the passed Responder returns an error and the response body exists. -func ByClosingIfError() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil && resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingBytes(v *[]byte) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - bytes, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - *v = bytes - } - } - return err - }) - } -} - -// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingJSON(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - // Some responses might include a BOM, remove for successful unmarshalling - b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else if len(strings.Trim(string(b), " ")) > 0 { - errInner = json.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingXML(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - errInner = xml.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. On error, response body is fully read into a buffer and -// presented in the returned error, as well as in the response body. -func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - if resp.Body != nil { - defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - derr.ServiceError = b - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - err = derr - } - return err - }) - } -} - -// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is -// anything other than HTTP 200. -func WithErrorUnlessOK() RespondDecorator { - return WithErrorUnlessStatusCode(http.StatusOK) -} - -// ExtractHeader extracts all values of the specified header from the http.Response. It returns an -// empty string slice if the passed http.Response is nil or the header does not exist. -func ExtractHeader(header string, resp *http.Response) []string { - if resp != nil && resp.Header != nil { - return resp.Header[http.CanonicalHeaderKey(header)] - } - return nil -} - -// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It -// returns an empty string if the passed http.Response is nil or the header does not exist. -func ExtractHeaderValue(header string, resp *http.Response) string { - h := ExtractHeader(header, resp) - if len(h) > 0 { - return h[0] - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go deleted file mode 100644 index fa11dbe..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go +++ /dev/null @@ -1,52 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. -func NewRetriableRequest(req *http.Request) *RetriableRequest { - return &RetriableRequest{req: req} -} - -// Request returns the wrapped HTTP request. -func (rr *RetriableRequest) Request() *http.Request { - return rr.req -} - -func (rr *RetriableRequest) prepareFromByteReader() (err error) { - // fall back to making a copy (only do this once) - b := []byte{} - if rr.req.ContentLength > 0 { - b = make([]byte, rr.req.ContentLength) - _, err = io.ReadFull(rr.req.Body, b) - if err != nil { - return err - } - } else { - b, err = ioutil.ReadAll(rr.req.Body) - if err != nil { - return err - } - } - rr.br = bytes.NewReader(b) - rr.req.Body = ioutil.NopCloser(rr.br) - return err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go deleted file mode 100644 index 4c87030..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build !go1.8 -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.br != nil { - _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go deleted file mode 100644 index 05847c0..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build go1.8 -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - rc io.ReadCloser - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.rc != nil { - rr.req.Body = rr.rc - } else if rr.br != nil { - _, err = rr.br.Seek(0, io.SeekStart) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.req.GetBody != nil { - // this will allow us to preserve the body without having to - // make a copy. note we need to do this on each iteration - rr.rc, err = rr.req.GetBody() - if err != nil { - return err - } - } else if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.GetBody = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go deleted file mode 100644 index 118de81..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ /dev/null @@ -1,458 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/tls" - "fmt" - "log" - "math" - "net" - "net/http" - "net/http/cookiejar" - "strconv" - "sync" - "time" - - "github.com/Azure/go-autorest/logger" - "github.com/Azure/go-autorest/tracing" -) - -// there is one sender per TLS renegotiation type, i.e. count of tls.RenegotiationSupport enums -const defaultSendersCount = 3 - -type defaultSender struct { - sender Sender - init *sync.Once -} - -// each type of sender will be created on demand in sender() -var defaultSenders [defaultSendersCount]defaultSender - -func init() { - for i := 0; i < defaultSendersCount; i++ { - defaultSenders[i].init = &sync.Once{} - } -} - -// used as a key type in context.WithValue() -type ctxSendDecorators struct{} - -// WithSendDecorators adds the specified SendDecorators to the provided context. -// If no SendDecorators are provided the context is unchanged. -func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { - if len(sendDecorator) == 0 { - return ctx - } - return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) -} - -// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. -func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { - inCtx := ctx.Value(ctxSendDecorators{}) - if sd, ok := inCtx.([]SendDecorator); ok { - return sd - } - return defaultSendDecorators -} - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(sender(tls.RenegotiateNever), decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -// Send sends, by means of the default http.Client, the passed http.Request, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// Send is a convenience method and not recommended for production. Advanced users should use -// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). -// -// Send will not poll or retry requests. -func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) -} - -// SendWithSender sends the passed http.Request, through the provided Sender, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// SendWithSender will not poll or retry requests. -func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return DecorateSender(s, decorators...).Do(r) -} - -func sender(renengotiation tls.RenegotiationSupport) Sender { - // note that we can't init defaultSenders in init() since it will - // execute before calling code has had a chance to enable tracing - defaultSenders[renengotiation].init.Do(func() { - // copied from http.DefaultTransport with a TLS minimum version. - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - Renegotiation: renengotiation, - }, - } - var roundTripper http.RoundTripper = transport - if tracing.IsEnabled() { - roundTripper = tracing.NewTransport(transport) - } - j, _ := cookiejar.New(nil) - defaultSenders[renengotiation].sender = &http.Client{Jar: j, Transport: roundTripper} - }) - return defaultSenders[renengotiation].sender -} - -// AfterDelay returns a SendDecorator that delays for the passed time.Duration before -// invoking the Sender. The delay may be terminated by closing the optional channel on the -// http.Request. If canceled, no further Senders are invoked. -func AfterDelay(d time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Context().Done()) { - return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") - } - return s.Do(r) - }) - } -} - -// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. -func AsIs() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return s.Do(r) - }) - } -} - -// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which -// it closes the response if the passed Sender returns an error and the response body exists. -func DoCloseIfError() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - Respond(resp, ByDiscardingBody(), ByClosing()) - } - return resp, err - }) - } -} - -// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is -// among the set passed. Since these are artificial errors, the response body may still require -// closing. -func DoErrorIfStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func DoErrorUnlessStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the -// passed status codes. It expects the http.Response to contain a Location header providing the -// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than -// the supplied duration. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - - if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequestWithContext(r.Context(), resp) - - for err == nil && ResponseHasStatusCode(resp, codes...) { - Respond(resp, - ByDiscardingBody(), - ByClosing()) - resp, err = SendWithSender(s, r, - AfterDelay(GetRetryAfter(resp, delay))) - } - } - - return resp, err - }) - } -} - -// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - for attempt := 0; attempt < attempts; attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - DrainResponseBody(resp) - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - logger.Instance.Writef(logger.LogError, "DoRetryForAttempts: received error for attempt %d: %v\n", attempt+1, err) - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// Count429AsRetry indicates that a 429 response should be included as a retry attempt. -var Count429AsRetry = true - -// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received. -var Max429Delay time.Duration - -// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. -// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. -func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...) - }) - } -} - -// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the -// specified number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater -// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. -func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...) - }) - } -} - -func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - for attempt, delayCount := 0, 0; attempt < attempts+1; { - err = rr.Prepare() - if err != nil { - return - } - DrainResponseBody(resp) - resp, err = s.Do(rr.Request()) - // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication - // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. - if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { - return resp, err - } - if err != nil { - logger.Instance.Writef(logger.LogError, "DoRetryForStatusCodes: received error for attempt %d: %v\n", attempt+1, err) - } - delayed := DelayWithRetryAfter(resp, r.Context().Done()) - // if this was a 429 set the delay cap as specified. - // applicable only in the absence of a retry-after header. - if resp != nil && resp.StatusCode == http.StatusTooManyRequests { - cap = Max429Delay - } - if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) { - return resp, r.Context().Err() - } - // when count429 == false don't count a 429 against the number - // of attempts so that we continue to retry until it succeeds - if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { - attempt++ - } - // delay count is tracked separately from attempts to - // ensure that 429 participates in exponential back-off - delayCount++ - } - return resp, err -} - -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. -// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. -// The function returns true after successfully waiting for the specified duration. If there is -// no Retry-After header or the wait is cancelled the return value is false. -func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { - if resp == nil { - return false - } - var dur time.Duration - ra := resp.Header.Get("Retry-After") - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - dur = time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - dur = t.Sub(time.Now()) - } - if dur > 0 { - select { - case <-time.After(dur): - return true - case <-cancel: - return false - } - } - return false -} - -// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal -// to or greater than the specified duration, exponentially backing off between requests using the -// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the -// optional channel on the http.Request. -func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - end := time.Now().Add(d) - for attempt := 0; time.Now().Before(end); attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - DrainResponseBody(resp) - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - logger.Instance.Writef(logger.LogError, "DoRetryForDuration: received error for attempt %d: %v\n", attempt+1, err) - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// WithLogging returns a SendDecorator that implements simple before and after logging of the -// request. -func WithLogging(logger *log.Logger) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - logger.Printf("Sending %s %s", r.Method, r.URL) - resp, err := s.Do(r) - if err != nil { - logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) - } else { - logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) - } - return resp, err - }) - } -} - -// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, -// returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { - return DelayForBackoffWithCap(backoff, 0, attempt, cancel) -} - -// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. -// The delay may be canceled by closing the passed channel. If terminated early, returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { - d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second - if cap > 0 && d > cap { - d = cap - } - logger.Instance.Writef(logger.LogInfo, "DelayForBackoffWithCap: sleeping for %s\n", d) - select { - case <-time.After(d): - return true - case <-cancel: - return false - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go deleted file mode 100644 index 3467b8f..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ /dev/null @@ -1,232 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "reflect" - "strings" -) - -// EncodedAs is a series of constants specifying various data encodings -type EncodedAs string - -const ( - // EncodedAsJSON states that data is encoded as JSON - EncodedAsJSON EncodedAs = "JSON" - - // EncodedAsXML states that data is encoded as Xml - EncodedAsXML EncodedAs = "XML" -) - -// Decoder defines the decoding method json.Decoder and xml.Decoder share -type Decoder interface { - Decode(v interface{}) error -} - -// NewDecoder creates a new decoder appropriate to the passed encoding. -// encodedAs specifies the type of encoding and r supplies the io.Reader containing the -// encoded data. -func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { - if encodedAs == EncodedAsJSON { - return json.NewDecoder(r) - } else if encodedAs == EncodedAsXML { - return xml.NewDecoder(r) - } - return nil -} - -// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy -// is especially useful if there is a chance the data will fail to decode. -// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v -// is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) -} - -// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. -// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. -// Further, when it is closed, it ensures that rc is closed as well. -func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return &teeReadCloser{rc, io.TeeReader(rc, w)} -} - -type teeReadCloser struct { - rc io.ReadCloser - r io.Reader -} - -func (t *teeReadCloser) Read(p []byte) (int, error) { - return t.r.Read(p) -} - -func (t *teeReadCloser) Close() error { - return t.rc.Close() -} - -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -func escapeValueStrings(m map[string]string) map[string]string { - for key, value := range m { - m[key] = url.QueryEscape(value) - } - return m -} - -func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { - mapOfStrings := make(map[string]string) - for key, value := range mapOfInterface { - mapOfStrings[key] = ensureValueString(value) - } - return mapOfStrings -} - -func ensureValueString(value interface{}) string { - if value == nil { - return "" - } - switch v := value.(type) { - case string: - return v - case []byte: - return string(v) - default: - return fmt.Sprintf("%v", v) - } -} - -// MapToValues method converts map[string]interface{} to url.Values. -func MapToValues(m map[string]interface{}) url.Values { - v := url.Values{} - for key, value := range m { - x := reflect.ValueOf(value) - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - for i := 0; i < x.Len(); i++ { - v.Add(key, ensureValueString(x.Index(i))) - } - } else { - v.Add(key, ensureValueString(value)) - } - } - return v -} - -// AsStringSlice method converts interface{} to []string. -// s must be of type slice or array or an error is returned. -// Each element of s will be converted to its string representation. -func AsStringSlice(s interface{}) ([]string, error) { - v := reflect.ValueOf(s) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - return nil, NewError("autorest", "AsStringSlice", "the value's type is not a slice or array.") - } - stringSlice := make([]string, 0, v.Len()) - - for i := 0; i < v.Len(); i++ { - stringSlice = append(stringSlice, fmt.Sprintf("%v", v.Index(i))) - } - return stringSlice, nil -} - -// String method converts interface v to string. If interface is a list, it -// joins list elements using the separator. Note that only sep[0] will be used for -// joining if any separator is specified. -func String(v interface{}, sep ...string) string { - if len(sep) == 0 { - return ensureValueString(v) - } - stringSlice, ok := v.([]string) - if ok == false { - var err error - stringSlice, err = AsStringSlice(v) - if err != nil { - panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) - } - } - return ensureValueString(strings.Join(stringSlice, sep[0])) -} - -// Encode method encodes url path and query parameters. -func Encode(location string, v interface{}, sep ...string) string { - s := String(v, sep...) - switch strings.ToLower(location) { - case "path": - return pathEscape(s) - case "query": - return queryEscape(s) - default: - return s - } -} - -func pathEscape(s string) string { - return strings.Replace(url.QueryEscape(s), "+", "%20", -1) -} - -func queryEscape(s string) string { - return url.QueryEscape(s) -} - -// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). -// This is mainly useful for long-running operations that use the Azure-AsyncOperation -// header, so we change the initial PUT into a GET to retrieve the final result. -func ChangeToGet(req *http.Request) *http.Request { - req.Method = "GET" - req.Body = nil - req.ContentLength = 0 - req.Header.Del("Content-Length") - return req -} - -// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false -// if it's not. If the error doesn't implement the net.Error interface the return value is true. -func IsTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} - -// DrainResponseBody reads the response body then closes it. -func DrainResponseBody(resp *http.Response) error { - if resp != nil && resp.Body != nil { - _, err := io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - return err - } - return nil -} - -func setHeader(r *http.Request, key, value string) { - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(key, value) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go deleted file mode 100644 index 3133fcc..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build go1.13 -// +build go1.13 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "errors" - - "github.com/Azure/go-autorest/autorest/adal" -) - -// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError interface. -func IsTokenRefreshError(err error) bool { - var tre adal.TokenRefreshError - return errors.As(err, &tre) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go deleted file mode 100644 index 851e152..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !go1.13 -// +build !go1.13 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import "github.com/Azure/go-autorest/autorest/adal" - -// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError -// interface. If err is a DetailedError it will walk the chain of Original errors. -func IsTokenRefreshError(err error) bool { - if _, ok := err.(adal.TokenRefreshError); ok { - return true - } - if de, ok := err.(DetailedError); ok { - return IsTokenRefreshError(de.Original) - } - return false -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go deleted file mode 100644 index 713e235..0000000 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ /dev/null @@ -1,41 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "runtime" -) - -const number = "v14.2.1" - -var ( - userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. -func UserAgent() string { - return userAgent -} - -// Version returns the semantic version (see http://semver.org). -func Version() string { - return number -} diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml deleted file mode 100644 index 6fb8404..0000000 --- a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml +++ /dev/null @@ -1,105 +0,0 @@ -variables: - GOPATH: '$(system.defaultWorkingDirectory)/work' - sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' - -jobs: - - job: 'goautorest' - displayName: 'Run go-autorest CI Checks' - - strategy: - matrix: - Linux_Go113: - vm.image: 'ubuntu-18.04' - go.version: '1.13' - Linux_Go114: - vm.image: 'ubuntu-18.04' - go.version: '1.14' - - pool: - vmImage: '$(vm.image)' - - steps: - - task: GoTool@0 - inputs: - version: '$(go.version)' - displayName: "Select Go Version" - - - script: | - set -e - mkdir -p '$(GOPATH)/bin' - mkdir -p '$(sdkPath)' - shopt -s extglob - mv !(work) '$(sdkPath)' - echo '##vso[task.prependpath]$(GOPATH)/bin' - displayName: 'Create Go Workspace' - - - script: | - set -e - curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure -v - go install ./vendor/golang.org/x/lint/golint - go get github.com/jstemmer/go-junit-report - go get github.com/axw/gocov/gocov - go get github.com/AlekSi/gocov-xml - go get -u github.com/matm/gocov-html - workingDirectory: '$(sdkPath)' - displayName: 'Install Dependencies' - - - script: | - go vet ./autorest/... - go vet ./logger/... - go vet ./tracing/... - workingDirectory: '$(sdkPath)' - displayName: 'Vet' - - - script: | - go build -v ./autorest/... - go build -v ./logger/... - go build -v ./tracing/... - workingDirectory: '$(sdkPath)' - displayName: 'Build' - - - script: | - set -e - go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml - gocov convert coverage.txt > coverage.json - gocov-xml < coverage.json > coverage.xml - gocov-html < coverage.json > coverage.html - workingDirectory: '$(sdkPath)' - displayName: 'Run Tests' - - - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 - workingDirectory: '$(sdkPath)' - displayName: 'Copyright Header Check' - failOnStderr: true - condition: succeededOrFailed() - - - script: | - gofmt -s -l -w ./autorest/. >&2 - gofmt -s -l -w ./logger/. >&2 - gofmt -s -l -w ./tracing/. >&2 - workingDirectory: '$(sdkPath)' - displayName: 'Format Check' - failOnStderr: true - condition: succeededOrFailed() - - - script: | - golint ./autorest/... >&2 - golint ./logger/... >&2 - golint ./tracing/... >&2 - workingDirectory: '$(sdkPath)' - displayName: 'Linter Check' - failOnStderr: true - condition: succeededOrFailed() - - - task: PublishTestResults@2 - inputs: - testRunner: JUnit - testResultsFiles: $(sdkPath)/report.xml - failTaskOnFailedTests: true - - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - summaryFileLocation: $(sdkPath)/coverage.xml - additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go deleted file mode 100644 index 99ae6ca..0000000 --- a/vendor/github.com/Azure/go-autorest/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. -*/ -package go_autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE deleted file mode 100644 index b9d6a27..0000000 --- a/vendor/github.com/Azure/go-autorest/logger/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go deleted file mode 100644 index 0aa2768..0000000 --- a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package logger - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go deleted file mode 100644 index 2f5d8cc..0000000 --- a/vendor/github.com/Azure/go-autorest/logger/logger.go +++ /dev/null @@ -1,337 +0,0 @@ -package logger - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "strings" - "sync" - "time" -) - -// LevelType tells a logger the minimum level to log. When code reports a log entry, -// the LogLevel indicates the level of the log entry. The logger only records entries -// whose level is at least the level it was told to log. See the Log* constants. -// For example, if a logger is configured with LogError, then LogError, LogPanic, -// and LogFatal entries will be logged; lower level entries are ignored. -type LevelType uint32 - -const ( - // LogNone tells a logger not to log any entries passed to it. - LogNone LevelType = iota - - // LogFatal tells a logger to log all LogFatal entries passed to it. - LogFatal - - // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. - LogPanic - - // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. - LogError - - // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogWarning - - // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogInfo - - // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogDebug - - // LogAuth is a special case of LogDebug, it tells a logger to also log the body of an authentication request and response. - // NOTE: this can disclose sensitive information, use with care. - LogAuth -) - -const ( - logNone = "NONE" - logFatal = "FATAL" - logPanic = "PANIC" - logError = "ERROR" - logWarning = "WARNING" - logInfo = "INFO" - logDebug = "DEBUG" - logAuth = "AUTH" - logUnknown = "UNKNOWN" -) - -// ParseLevel converts the specified string into the corresponding LevelType. -func ParseLevel(s string) (lt LevelType, err error) { - switch strings.ToUpper(s) { - case logFatal: - lt = LogFatal - case logPanic: - lt = LogPanic - case logError: - lt = LogError - case logWarning: - lt = LogWarning - case logInfo: - lt = LogInfo - case logDebug: - lt = LogDebug - case logAuth: - lt = LogAuth - default: - err = fmt.Errorf("bad log level '%s'", s) - } - return -} - -// String implements the stringer interface for LevelType. -func (lt LevelType) String() string { - switch lt { - case LogNone: - return logNone - case LogFatal: - return logFatal - case LogPanic: - return logPanic - case LogError: - return logError - case LogWarning: - return logWarning - case LogInfo: - return logInfo - case LogDebug: - return logDebug - case LogAuth: - return logAuth - default: - return logUnknown - } -} - -// Filter defines functions for filtering HTTP request/response content. -type Filter struct { - // URL returns a potentially modified string representation of a request URL. - URL func(u *url.URL) string - - // Header returns a potentially modified set of values for the specified key. - // To completely exclude the header key/values return false. - Header func(key string, val []string) (bool, []string) - - // Body returns a potentially modified request/response body. - Body func(b []byte) []byte -} - -func (f Filter) processURL(u *url.URL) string { - if f.URL == nil { - return u.String() - } - return f.URL(u) -} - -func (f Filter) processHeader(k string, val []string) (bool, []string) { - if f.Header == nil { - return true, val - } - return f.Header(k, val) -} - -func (f Filter) processBody(b []byte) []byte { - if f.Body == nil { - return b - } - return f.Body(b) -} - -// Writer defines methods for writing to a logging facility. -type Writer interface { - // Writeln writes the specified message with the standard log entry header and new-line character. - Writeln(level LevelType, message string) - - // Writef writes the specified format specifier with the standard log entry header and no new-line character. - Writef(level LevelType, format string, a ...interface{}) - - // WriteRequest writes the specified HTTP request to the logger if the log level is greater than - // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no request content is excluded. - WriteRequest(req *http.Request, filter Filter) - - // WriteResponse writes the specified HTTP response to the logger if the log level is greater than - // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no response content is excluded. - WriteResponse(resp *http.Response, filter Filter) -} - -// Instance is the default log writer initialized during package init. -// This can be replaced with a custom implementation as required. -var Instance Writer - -// default log level -var logLevel = LogNone - -// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. -// If no value was specified the default value is LogNone. -// Custom loggers can call this to retrieve the configured log level. -func Level() LevelType { - return logLevel -} - -func init() { - // separated for testing purposes - initDefaultLogger() -} - -func initDefaultLogger() { - // init with nilLogger so callers don't have to do a nil check on Default - Instance = nilLogger{} - llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) - if llStr == "" { - return - } - var err error - logLevel, err = ParseLevel(llStr) - if err != nil { - fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) - return - } - if logLevel == LogNone { - return - } - // default to stderr - dest := os.Stderr - lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") - if strings.EqualFold(lfStr, "stdout") { - dest = os.Stdout - } else if lfStr != "" { - lf, err := os.Create(lfStr) - if err == nil { - dest = lf - } else { - fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) - } - } - Instance = fileLogger{ - logLevel: logLevel, - mu: &sync.Mutex{}, - logFile: dest, - } -} - -// the nil logger does nothing -type nilLogger struct{} - -func (nilLogger) Writeln(LevelType, string) {} - -func (nilLogger) Writef(LevelType, string, ...interface{}) {} - -func (nilLogger) WriteRequest(*http.Request, Filter) {} - -func (nilLogger) WriteResponse(*http.Response, Filter) {} - -// A File is used instead of a Logger so the stream can be flushed after every write. -type fileLogger struct { - logLevel LevelType - mu *sync.Mutex // for synchronizing writes to logFile - logFile *os.File -} - -func (fl fileLogger) Writeln(level LevelType, message string) { - fl.Writef(level, "%s\n", message) -} - -func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { - if fl.logLevel >= level { - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) - fl.logFile.Sync() - } -} - -func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { - if req == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) - // dump headers - for k, v := range req.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(req.Header, req.Body) { - // dump body - body, err := ioutil.ReadAll(req.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - if nc, ok := req.Body.(io.Seeker); ok { - // rewind to the beginning - nc.Seek(0, io.SeekStart) - } else { - // recreate the body - req.Body = ioutil.NopCloser(bytes.NewReader(body)) - } - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { - if resp == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) - // dump headers - for k, v := range resp.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(resp.Header, resp.Body) { - // dump body - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -// returns true if the provided body should be included in the log -func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { - ct := header.Get("Content-Type") - return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") -} - -// creates standard header for log entries, it contains a timestamp and the log level -func entryHeader(level LevelType) string { - // this format provides a fixed number of digits so the size of the timestamp is constant - return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) -} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE deleted file mode 100644 index b9d6a27..0000000 --- a/vendor/github.com/Azure/go-autorest/tracing/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go deleted file mode 100644 index e163975..0000000 --- a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package tracing - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go deleted file mode 100644 index 0e7a6e9..0000000 --- a/vendor/github.com/Azure/go-autorest/tracing/tracing.go +++ /dev/null @@ -1,67 +0,0 @@ -package tracing - -// Copyright 2018 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" -) - -// Tracer represents an HTTP tracing facility. -type Tracer interface { - NewTransport(base *http.Transport) http.RoundTripper - StartSpan(ctx context.Context, name string) context.Context - EndSpan(ctx context.Context, httpStatusCode int, err error) -} - -var ( - tracer Tracer -) - -// Register will register the provided Tracer. Pass nil to unregister a Tracer. -func Register(t Tracer) { - tracer = t -} - -// IsEnabled returns true if a Tracer has been registered. -func IsEnabled() bool { - return tracer != nil -} - -// NewTransport creates a new instrumenting http.RoundTripper for the -// registered Tracer. If no Tracer has been registered it returns nil. -func NewTransport(base *http.Transport) http.RoundTripper { - if tracer != nil { - return tracer.NewTransport(base) - } - return nil -} - -// StartSpan starts a trace span with the specified name, associating it with the -// provided context. Has no effect if a Tracer has not been registered. -func StartSpan(ctx context.Context, name string) context.Context { - if tracer != nil { - return tracer.StartSpan(ctx, name) - } - return ctx -} - -// EndSpan ends a previously started span stored in the context. -// Has no effect if a Tracer has not been registered. -func EndSpan(ctx context.Context, httpStatusCode int, err error) { - if tracer != nil { - tracer.EndSpan(ctx, httpStatusCode, err) - } -} diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a6..8bf0e5b 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 0000000..94b9c44 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d..a9e0d45 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5b..3e8b132 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) + blockLoop() - CMPQ SI, BX - JLE blockLoop + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX + ADDQ n, h - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX + ADDQ $24, end + CMPQ p, end + JG try4 - CMPQ SI, BX - JG fourByte +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 + CMPQ p, end + JLE loop8 - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX +try4: + ADDQ $4, end + CMPQ p, end + JG try1 - CMPQ SI, BX - JLE wordLoop + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h - MOVQ AX, ret+24(FP) + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 0000000..7e3145a --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b80..9216e0a 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a821..26df13b 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7..e86f1b5 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca..1c1638f 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 74a3781..352018e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,10 +1,26 @@ # Change history of go-restful -## [v3.9.0] - 20221-07-21 +## [v3.10.2] - 2023-03-09 + +- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 + see comment in Readme how to customize this behaviour. + +## [v3.10.1] - 2022-11-19 + +- fix broken 3.10.0 by using path package for joining paths + +## [v3.10.0] - 2022-10-11 - BROKEN + +- changed tokenizer to match std route match behavior; do not trimright the path (#511) +- Add MIME_ZIP (#512) +- Add MIME_ZIP and HEADER_ContentDisposition (#513) +- Changed how to get query parameter issue #510 + +## [v3.9.0] - 2022-07-21 - add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci) -## [v3.8.0] - 20221-06-06 +## [v3.8.0] - 2022-06-06 - use exact matching of allowed domain entries, issue #489 (#493) - this changes fixes [security] Authorization Bypass Through User-Controlled Key diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 0625359..85da901 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -96,6 +96,10 @@ There are several hooks to customize the behavior of the go-restful package. - Compression - Encoders for other serializers - Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` +- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path + - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore. + - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`. + - you can set value to a custom implementation (must implement MergePathStrategyFunc) ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/constants.go b/vendor/github.com/emicklei/go-restful/v3/constants.go index 203439c..2328bde 100644 --- a/vendor/github.com/emicklei/go-restful/v3/constants.go +++ b/vendor/github.com/emicklei/go-restful/v3/constants.go @@ -7,12 +7,14 @@ package restful const ( MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces() MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default HEADER_Allow = "Allow" HEADER_Accept = "Accept" HEADER_Origin = "Origin" HEADER_ContentType = "Content-Type" + HEADER_ContentDisposition = "Content-Disposition" HEADER_LastModified = "Last-Modified" HEADER_AcceptEncoding = "Accept-Encoding" HEADER_ContentEncoding = "Content-Encoding" diff --git a/vendor/github.com/emicklei/go-restful/v3/request.go b/vendor/github.com/emicklei/go-restful/v3/request.go index 5725a07..0020095 100644 --- a/vendor/github.com/emicklei/go-restful/v3/request.go +++ b/vendor/github.com/emicklei/go-restful/v3/request.go @@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request { // a "Unable to unmarshal content of type:" response is returned. // Valid values are restful.MIME_JSON and restful.MIME_XML // Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) +// +// restful.DefaultRequestContentType(restful.MIME_JSON) func DefaultRequestContentType(mime string) { defaultRequestContentType = mime } @@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string { // QueryParameter returns the (first) Query parameter value by its name func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) + return r.Request.URL.Query().Get(name) } // QueryParameters returns the all the query parameters values by name diff --git a/vendor/github.com/emicklei/go-restful/v3/response.go b/vendor/github.com/emicklei/go-restful/v3/response.go index 8f0b56a..a41a92c 100644 --- a/vendor/github.com/emicklei/go-restful/v3/response.go +++ b/vendor/github.com/emicklei/go-restful/v3/response.go @@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) { if DefaultResponseMimeType == MIME_XML { return entityAccessRegistry.accessorAt(MIME_XML) } + if DefaultResponseMimeType == MIME_ZIP { + return entityAccessRegistry.accessorAt(MIME_ZIP) + } // Fallback to whatever the route says it can produce. // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html for _, each := range r.routeProduces { diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 193f4a6..ea05b3d 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -164,7 +164,7 @@ func tokenizePath(path string) []string { if "/" == path { return nil } - return strings.Split(strings.Trim(path, "/"), "/") + return strings.Split(strings.TrimLeft(path, "/"), "/") } // for debugging @@ -176,3 +176,5 @@ func (r *Route) String() string { func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } + +var TrimRightSlashEnabled = false diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 23641b6..827f471 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -7,6 +7,7 @@ package restful import ( "fmt" "os" + "path" "reflect" "runtime" "strings" @@ -46,11 +47,12 @@ type RouteBuilder struct { // Do evaluates each argument with the RouteBuilder itself. // This allows you to follow DRY principles without breaking the fluent programming style. // Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) // -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } +// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) +// +// func Returns500(b *RouteBuilder) { +// b.Returns(500, "Internal Server Error", restful.ServiceError{}) +// } func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { for _, each := range oneArgBlocks { each(b) @@ -351,8 +353,28 @@ func (b *RouteBuilder) Build() Route { return route } -func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") +type MergePathStrategyFunc func(rootPath, routePath string) string + +var ( + // behavior >= 3.10 + PathJoinStrategy = func(rootPath, routePath string) string { + return path.Join(rootPath, routePath) + } + + // behavior <= 3.9 + TrimSlashStrategy = func(rootPath, routePath string) string { + return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } + + // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices. + // The value is set to PathJoinStrategy + // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227] + MergePathStrategy = PathJoinStrategy +) + +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { + return MergePathStrategy(rootPath, routePath) } var anonymousFuncCount int32 diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 0000000..7accdb0 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,787 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []interface{}) []interface{} + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []interface{}) []interface{} + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []interface{} + valuesStr string + depth int + opts Options +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []interface{} + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []interface{}) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') + } + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + if len(f.valuesStr) > 0 { + if continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + buf.WriteByte(' ') + } + } + continuing = true + buf.WriteString(f.valuesStr) + } + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + if f.outputFormat == outputJSON { + buf.WriteByte('}') + } + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + if escapeKeys { + buf.WriteString(prettyString(k)) + } else { + // this is faster + buf.WriteByte('"') + buf.WriteString(k) + buf.WriteByte('"') + } + if f.outputFormat == outputJSON { + buf.WriteByte(':') + } else { + buf.WriteByte('=') + } + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) pretty(value interface{}) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if i > 0 { + buf.WriteByte(',') + } + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteByte('"') + buf.WriteString(name) + buf.WriteByte('"') + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(',') + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v interface{}) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v interface{}) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []interface{}) []interface{} { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return f.prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml deleted file mode 100644 index 03a22fe..0000000 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.15.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index f9381ae..013fc19 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,8 +1,6 @@ linters-settings: govet: check-shadowing: true - golint: - min-confidence: 0 gocyclo: min-complexity: 30 maligned: @@ -12,6 +10,8 @@ linters-settings: goconst: min-len: 2 min-occurrences: 4 + paralleltest: + ignore-missing: true linters: enable-all: true disable: @@ -39,3 +39,12 @@ linters: - nestif - godot - errorlint + - varcheck + - interfacer + - deadcode + - golint + - ifshort + - structcheck + - nosnakecase + - varnamelen + - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml deleted file mode 100644 index 05482f4..0000000 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -install: -- go get gotest.tools/gotestsum -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go index 8956c30..f0610cf 100644 --- a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -7,8 +7,8 @@ import ( ) const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" + defaultHTTPPort = ":80" + defaultHTTPSPort = ":443" ) // Regular expressions used by the normalizations @@ -18,18 +18,24 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`) // NormalizeURL will normalize the specified URL // This was added to replace a previous call to the no longer maintained purell library: // The call that was used looked like the following: -// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) // // To explain all that was included in the call above, purell.FlagsSafe was really just the following: -// - FlagLowercaseScheme -// - FlagLowercaseHost -// - FlagRemoveDefaultPort -// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// +// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment. func NormalizeURL(u *url.URL) { lowercaseScheme(u) lowercaseHost(u) removeDefaultPort(u) removeDuplicateSlashes(u) + + u.RawPath = "" + u.RawFragment = "" } func lowercaseScheme(u *url.URL) { @@ -48,7 +54,7 @@ func removeDefaultPort(u *url.URL) { if len(u.Host) > 0 { scheme := strings.ToLower(u.Scheme) u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { return "" } return val diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/.editorconfig new file mode 100644 index 0000000..b0c9536 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = tab +indent_size = 8 + +[*.{md,yml,yaml,json}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/.gitattributes new file mode 100644 index 0000000..176a458 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/.gitignore new file mode 100644 index 0000000..5e3002f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md new file mode 100644 index 0000000..61d8ebf --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md @@ -0,0 +1,364 @@ +# Changelog + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/LICENSE.txt new file mode 100644 index 0000000..f311b1e --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/README.md new file mode 100644 index 0000000..7257947 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/README.md @@ -0,0 +1,73 @@ +# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) + +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. + +## Usage + +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). + +For standard usage, read on. + +### Load the Slim-Sprig library + +To load the Slim-Sprig `FuncMap`: + +```go + +import ( + "html/template" + + "github.com/go-task/slim-sprig" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/Taskfile.yml new file mode 100644 index 0000000..cdcfd22 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/Taskfile.yml @@ -0,0 +1,12 @@ +# https://taskfile.dev + +version: '2' + +tasks: + default: + cmds: + - task: test + + test: + cmds: + - go test -v . diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/crypto.go new file mode 100644 index 0000000..d06e516 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/crypto.go @@ -0,0 +1,24 @@ +package sprig + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash/adler32" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/date.go new file mode 100644 index 0000000..ed022dd --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/defaults.go new file mode 100644 index 0000000..b9f9796 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/dict.go new file mode 100644 index 0000000..77ebc61 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/dict.go @@ -0,0 +1,118 @@ +package sprig + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/doc.go new file mode 100644 index 0000000..aabb9d4 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/functions.go new file mode 100644 index 0000000..5ea74f8 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/functions.go @@ -0,0 +1,317 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/list.go new file mode 100644 index 0000000..ca0fbb7 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/network.go new file mode 100644 index 0000000..108d78a --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/numeric.go new file mode 100644 index 0000000..98cbb37 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/numeric.go @@ -0,0 +1,228 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/reflect.go new file mode 100644 index 0000000..8a65c13 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/regex.go new file mode 100644 index 0000000..fab5510 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/strings.go new file mode 100644 index 0000000..3c62d6b --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/strings.go @@ -0,0 +1,189 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/url.go new file mode 100644 index 0000000..b8e120e --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/.gitignore b/vendor/github.com/golang-jwt/jwt/v4/.gitignore deleted file mode 100644 index 09573e0..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -bin -.idea/ - diff --git a/vendor/github.com/golang-jwt/jwt/v4/LICENSE b/vendor/github.com/golang-jwt/jwt/v4/LICENSE deleted file mode 100644 index 35dbc25..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) 2012 Dave Grijalva -Copyright (c) 2021 golang-jwt maintainers - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md deleted file mode 100644 index 32966f5..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md +++ /dev/null @@ -1,22 +0,0 @@ -## Migration Guide (v4.0.0) - -Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be: - - "github.com/golang-jwt/jwt/v4" - -The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as -`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having -troubles migrating, please open an issue. - -You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`. - -And then you'd typically run: - -``` -go get github.com/golang-jwt/jwt/v4 -go mod tidy -``` - -## Older releases (before v3.2.0) - -The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md deleted file mode 100644 index f5d551c..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# jwt-go - -[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) -[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v4.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) - -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). - -Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. -See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. - -> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. - - -**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. - -**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. - -### Supported Go versions - -Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). -So we will support a major version of Go until there are two newer major releases. -We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities -which will not be fixed. - -## What the heck is a JWT? - -JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Installation Guidelines - -1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program. - -```sh -go get -u github.com/golang-jwt/jwt/v4 -``` - -2. Import it in your code: - -```go -import "github.com/golang-jwt/jwt/v4" -``` - -## Examples - -See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: - -* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) -* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) -* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) - -## Extensions - -This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`. - -A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards. - -| Extension | Purpose | Repo | -| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ | -| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | -| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | -| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc | - -*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers - -## Compliance - -This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: - -* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). - -**BREAKING CHANGES:*** -A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. - -## Usage Tips - -### Signing vs Encryption - -A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: - -* The author of the token was in the possession of the signing secret -* The data has not been modified since it was signed - -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. - -### Choosing a Signing Method - -There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. - -Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. - -Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. - -### Signing Methods and Key Types - -Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: - -* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation -* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation - -### JWT and OAuth - -It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. - -Without going too far down the rabbit hole, here's a description of the interaction of these technologies: - -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. -* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. -* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - -### Troubleshooting - -This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. - -## More - -Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. - -[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt). diff --git a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md deleted file mode 100644 index b08402c..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md +++ /dev/null @@ -1,19 +0,0 @@ -# Security Policy - -## Supported Versions - -As of February 2022 (and until this document is updated), the latest version `v4` is supported. - -## Reporting a Vulnerability - -If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). - -You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. - -## Public Discussions - -Please avoid publicly discussing a potential security vulnerability. - -Let's take this offline and find a solution first, this limits the potential impact as much as possible. - -We appreciate your help! diff --git a/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md deleted file mode 100644 index afbfc4e..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md +++ /dev/null @@ -1,135 +0,0 @@ -## `jwt-go` Version History - -#### 4.0.0 - -* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`. - -#### 3.2.2 - -* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). -* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). -* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). -* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). - -#### 3.2.1 - -* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code - * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` -* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 - -#### 3.2.0 - -* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation -* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate -* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. -* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. - -#### 3.1.0 - -* Improvements to `jwt` command line tool -* Added `SkipClaimsValidation` option to `Parser` -* Documentation updates - -#### 3.0.0 - -* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code - * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. - * `ParseFromRequest` has been moved to `request` subpackage and usage has changed - * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. -* Other Additions and Changes - * Added `Claims` interface type to allow users to decode the claims into a custom type - * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. - * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage - * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` - * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. - * Added several new, more specific, validation errors to error type bitmask - * Moved examples from README to executable example files - * Signing method registry is now thread safe - * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) - -#### 2.7.0 - -This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. - -* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying -* Error text for expired tokens includes how long it's been expired -* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` -* Documentation updates - -#### 2.6.0 - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go deleted file mode 100644 index 9d95cad..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/claims.go +++ /dev/null @@ -1,273 +0,0 @@ -package jwt - -import ( - "crypto/subtle" - "fmt" - "time" -) - -// Claims must just have a Valid method that determines -// if the token is invalid for any supported reason -type Claims interface { - Valid() error -} - -// RegisteredClaims are a structured version of the JWT Claims Set, -// restricted to Registered Claim Names, as referenced at -// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 -// -// This type can be used on its own, but then additional private and -// public claims embedded in the JWT will not be parsed. The typical usecase -// therefore is to embedded this in a user-defined claim type. -// -// See examples for how to use this with your own claim types. -type RegisteredClaims struct { - // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1 - Issuer string `json:"iss,omitempty"` - - // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 - Subject string `json:"sub,omitempty"` - - // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3 - Audience ClaimStrings `json:"aud,omitempty"` - - // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 - ExpiresAt *NumericDate `json:"exp,omitempty"` - - // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5 - NotBefore *NumericDate `json:"nbf,omitempty"` - - // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6 - IssuedAt *NumericDate `json:"iat,omitempty"` - - // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7 - ID string `json:"jti,omitempty"` -} - -// Valid validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (c RegisteredClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc() - - // The claims below are optional, by default, so if they are set to the - // default value in Go, let's not fail the verification for them. - if !c.VerifyExpiresAt(now, false) { - delta := now.Sub(c.ExpiresAt.Time) - vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) - vErr.Errors |= ValidationErrorExpired - } - - if !c.VerifyIssuedAt(now, false) { - vErr.Inner = ErrTokenUsedBeforeIssued - vErr.Errors |= ValidationErrorIssuedAt - } - - if !c.VerifyNotBefore(now, false) { - vErr.Inner = ErrTokenNotValidYet - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} - -// VerifyAudience compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud(c.Audience, cmp, req) -} - -// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). -// If req is false, it will return true, if exp is unset. -func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool { - if c.ExpiresAt == nil { - return verifyExp(nil, cmp, req) - } - - return verifyExp(&c.ExpiresAt.Time, cmp, req) -} - -// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). -// If req is false, it will return true, if iat is unset. -func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool { - if c.IssuedAt == nil { - return verifyIat(nil, cmp, req) - } - - return verifyIat(&c.IssuedAt.Time, cmp, req) -} - -// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). -// If req is false, it will return true, if nbf is unset. -func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool { - if c.NotBefore == nil { - return verifyNbf(nil, cmp, req) - } - - return verifyNbf(&c.NotBefore.Time, cmp, req) -} - -// VerifyIssuer compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool { - return verifyIss(c.Issuer, cmp, req) -} - -// StandardClaims are a structured version of the JWT Claims Set, as referenced at -// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the -// specification exactly, since they were based on an earlier draft of the -// specification and not updated. The main difference is that they only -// support integer-based date fields and singular audiences. This might lead to -// incompatibilities with other JWT implementations. The use of this is discouraged, instead -// the newer RegisteredClaims struct should be used. -// -// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct. -type StandardClaims struct { - Audience string `json:"aud,omitempty"` - ExpiresAt int64 `json:"exp,omitempty"` - Id string `json:"jti,omitempty"` - IssuedAt int64 `json:"iat,omitempty"` - Issuer string `json:"iss,omitempty"` - NotBefore int64 `json:"nbf,omitempty"` - Subject string `json:"sub,omitempty"` -} - -// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (c StandardClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - // The claims below are optional, by default, so if they are set to the - // default value in Go, let's not fail the verification for them. - if !c.VerifyExpiresAt(now, false) { - delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) - vErr.Errors |= ValidationErrorExpired - } - - if !c.VerifyIssuedAt(now, false) { - vErr.Inner = ErrTokenUsedBeforeIssued - vErr.Errors |= ValidationErrorIssuedAt - } - - if !c.VerifyNotBefore(now, false) { - vErr.Inner = ErrTokenNotValidYet - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} - -// VerifyAudience compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud([]string{c.Audience}, cmp, req) -} - -// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). -// If req is false, it will return true, if exp is unset. -func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { - if c.ExpiresAt == 0 { - return verifyExp(nil, time.Unix(cmp, 0), req) - } - - t := time.Unix(c.ExpiresAt, 0) - return verifyExp(&t, time.Unix(cmp, 0), req) -} - -// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). -// If req is false, it will return true, if iat is unset. -func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { - if c.IssuedAt == 0 { - return verifyIat(nil, time.Unix(cmp, 0), req) - } - - t := time.Unix(c.IssuedAt, 0) - return verifyIat(&t, time.Unix(cmp, 0), req) -} - -// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). -// If req is false, it will return true, if nbf is unset. -func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { - if c.NotBefore == 0 { - return verifyNbf(nil, time.Unix(cmp, 0), req) - } - - t := time.Unix(c.NotBefore, 0) - return verifyNbf(&t, time.Unix(cmp, 0), req) -} - -// VerifyIssuer compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { - return verifyIss(c.Issuer, cmp, req) -} - -// ----- helpers - -func verifyAud(aud []string, cmp string, required bool) bool { - if len(aud) == 0 { - return !required - } - // use a var here to keep constant time compare when looping over a number of claims - result := false - - var stringClaims string - for _, a := range aud { - if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { - result = true - } - stringClaims = stringClaims + a - } - - // case where "" is sent in one or many aud claims - if len(stringClaims) == 0 { - return !required - } - - return result -} - -func verifyExp(exp *time.Time, now time.Time, required bool) bool { - if exp == nil { - return !required - } - return now.Before(*exp) -} - -func verifyIat(iat *time.Time, now time.Time, required bool) bool { - if iat == nil { - return !required - } - return now.After(*iat) || now.Equal(*iat) -} - -func verifyNbf(nbf *time.Time, now time.Time, required bool) bool { - if nbf == nil { - return !required - } - return now.After(*nbf) || now.Equal(*nbf) -} - -func verifyIss(iss string, cmp string, required bool) bool { - if iss == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/doc.go b/vendor/github.com/golang-jwt/jwt/v4/doc.go deleted file mode 100644 index a86dc1a..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go deleted file mode 100644 index eac023f..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go +++ /dev/null @@ -1,142 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "math/big" -) - -var ( - // Sadly this is missing from crypto/ecdsa compared to crypto/rsa - ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") -) - -// SigningMethodECDSA implements the ECDSA family of signing methods. -// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification -type SigningMethodECDSA struct { - Name string - Hash crypto.Hash - KeySize int - CurveBits int -} - -// Specific instances for EC256 and company -var ( - SigningMethodES256 *SigningMethodECDSA - SigningMethodES384 *SigningMethodECDSA - SigningMethodES512 *SigningMethodECDSA -) - -func init() { - // ES256 - SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} - RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { - return SigningMethodES256 - }) - - // ES384 - SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} - RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { - return SigningMethodES384 - }) - - // ES512 - SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} - RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { - return SigningMethodES512 - }) -} - -func (m *SigningMethodECDSA) Alg() string { - return m.Name -} - -// Verify implements token verification for the SigningMethod. -// For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Get the key - var ecdsaKey *ecdsa.PublicKey - switch k := key.(type) { - case *ecdsa.PublicKey: - ecdsaKey = k - default: - return ErrInvalidKeyType - } - - if len(sig) != 2*m.KeySize { - return ErrECDSAVerification - } - - r := big.NewInt(0).SetBytes(sig[:m.KeySize]) - s := big.NewInt(0).SetBytes(sig[m.KeySize:]) - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { - return nil - } - - return ErrECDSAVerification -} - -// Sign implements token signing for the SigningMethod. -// For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { - // Get the key - var ecdsaKey *ecdsa.PrivateKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - ecdsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return r, s - if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { - curveBits := ecdsaKey.Curve.Params().BitSize - - if m.CurveBits != curveBits { - return "", ErrInvalidKey - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outputs (r and s) into big-endian byte arrays - // padded with zeros on the left to make sure the sizes work out. - // Output must be 2*keyBytes long. - out := make([]byte, 2*keyBytes) - r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. - s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. - - return EncodeSegment(out), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go deleted file mode 100644 index 5700636..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go +++ /dev/null @@ -1,69 +0,0 @@ -package jwt - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") -) - -// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure -func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *ecdsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, ErrNotECPrivateKey - } - - return pkey, nil -} - -// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key -func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *ecdsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, ErrNotECPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go deleted file mode 100644 index 07d3aac..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go +++ /dev/null @@ -1,85 +0,0 @@ -package jwt - -import ( - "errors" - - "crypto" - "crypto/ed25519" - "crypto/rand" -) - -var ( - ErrEd25519Verification = errors.New("ed25519: verification error") -) - -// SigningMethodEd25519 implements the EdDSA family. -// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification -type SigningMethodEd25519 struct{} - -// Specific instance for EdDSA -var ( - SigningMethodEdDSA *SigningMethodEd25519 -) - -func init() { - SigningMethodEdDSA = &SigningMethodEd25519{} - RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { - return SigningMethodEdDSA - }) -} - -func (m *SigningMethodEd25519) Alg() string { - return "EdDSA" -} - -// Verify implements token verification for the SigningMethod. -// For this verify method, key must be an ed25519.PublicKey -func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error { - var err error - var ed25519Key ed25519.PublicKey - var ok bool - - if ed25519Key, ok = key.(ed25519.PublicKey); !ok { - return ErrInvalidKeyType - } - - if len(ed25519Key) != ed25519.PublicKeySize { - return ErrInvalidKey - } - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Verify the signature - if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { - return ErrEd25519Verification - } - - return nil -} - -// Sign implements token signing for the SigningMethod. -// For this signing method, key must be an ed25519.PrivateKey -func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { - var ed25519Key crypto.Signer - var ok bool - - if ed25519Key, ok = key.(crypto.Signer); !ok { - return "", ErrInvalidKeyType - } - - if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { - return "", ErrInvalidKey - } - - // Sign the string and return the encoded result - // ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0) - sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0)) - if err != nil { - return "", err - } - return EncodeSegment(sig), nil -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go deleted file mode 100644 index cdb5e68..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go +++ /dev/null @@ -1,64 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ed25519" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key") - ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key") -) - -// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key -func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - - var pkey ed25519.PrivateKey - var ok bool - if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { - return nil, ErrNotEdPrivateKey - } - - return pkey, nil -} - -// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key -func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - return nil, err - } - - var pkey ed25519.PublicKey - var ok bool - if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { - return nil, ErrNotEdPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go deleted file mode 100644 index 10ac883..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/errors.go +++ /dev/null @@ -1,112 +0,0 @@ -package jwt - -import ( - "errors" -) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid") - ErrInvalidKeyType = errors.New("key is of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") - - ErrTokenMalformed = errors.New("token is malformed") - ErrTokenUnverifiable = errors.New("token is unverifiable") - ErrTokenSignatureInvalid = errors.New("token signature is invalid") - - ErrTokenInvalidAudience = errors.New("token has invalid audience") - ErrTokenExpired = errors.New("token is expired") - ErrTokenUsedBeforeIssued = errors.New("token used before issued") - ErrTokenInvalidIssuer = errors.New("token has invalid issuer") - ErrTokenNotValidYet = errors.New("token is not valid yet") - ErrTokenInvalidId = errors.New("token has invalid id") - ErrTokenInvalidClaims = errors.New("token has invalid claims") -) - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - - // Standard Claim validation errors - ValidationErrorAudience // AUD validation failed - ValidationErrorExpired // EXP validation failed - ValidationErrorIssuedAt // IAT validation failed - ValidationErrorIssuer // ISS validation failed - ValidationErrorNotValidYet // NBF validation failed - ValidationErrorId // JTI validation failed - ValidationErrorClaimsInvalid // Generic claims validation error -) - -// NewValidationError is a helper for constructing a ValidationError with a string error message -func NewValidationError(errorText string, errorFlags uint32) *ValidationError { - return &ValidationError{ - text: errorText, - Errors: errorFlags, - } -} - -// ValidationError represents an error from Parse if token is not valid -type ValidationError struct { - Inner error // stores the error returned by external dependencies, i.e.: KeyFunc - Errors uint32 // bitfield. see ValidationError... constants - text string // errors that do not have a valid error just have text -} - -// Error is the implementation of the err interface. -func (e ValidationError) Error() string { - if e.Inner != nil { - return e.Inner.Error() - } else if e.text != "" { - return e.text - } else { - return "token is invalid" - } -} - -// Unwrap gives errors.Is and errors.As access to the inner error. -func (e *ValidationError) Unwrap() error { - return e.Inner -} - -// No errors -func (e *ValidationError) valid() bool { - return e.Errors == 0 -} - -// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message -// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use -// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables. -func (e *ValidationError) Is(err error) bool { - // Check, if our inner error is a direct match - if errors.Is(errors.Unwrap(e), err) { - return true - } - - // Otherwise, we need to match using our error flags - switch err { - case ErrTokenMalformed: - return e.Errors&ValidationErrorMalformed != 0 - case ErrTokenUnverifiable: - return e.Errors&ValidationErrorUnverifiable != 0 - case ErrTokenSignatureInvalid: - return e.Errors&ValidationErrorSignatureInvalid != 0 - case ErrTokenInvalidAudience: - return e.Errors&ValidationErrorAudience != 0 - case ErrTokenExpired: - return e.Errors&ValidationErrorExpired != 0 - case ErrTokenUsedBeforeIssued: - return e.Errors&ValidationErrorIssuedAt != 0 - case ErrTokenInvalidIssuer: - return e.Errors&ValidationErrorIssuer != 0 - case ErrTokenNotValidYet: - return e.Errors&ValidationErrorNotValidYet != 0 - case ErrTokenInvalidId: - return e.Errors&ValidationErrorId != 0 - case ErrTokenInvalidClaims: - return e.Errors&ValidationErrorClaimsInvalid != 0 - } - - return false -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go deleted file mode 100644 index 011f68a..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/hmac.go +++ /dev/null @@ -1,95 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// SigningMethodHMAC implements the HMAC-SHA family of signing methods. -// Expects key type of []byte for both signing and validation -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid. -func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { - // Verify the key is the right type - keyBytes, ok := key.([]byte) - if !ok { - return ErrInvalidKeyType - } - - // Decode signature, for comparison - sig, err := DecodeSegment(signature) - if err != nil { - return err - } - - // Can we use the specified hashing method? - if !m.Hash.Available() { - return ErrHashUnavailable - } - - // This signing method is symmetric, so we validate the signature - // by reproducing the signature from the signing string and key, then - // comparing that against the provided signature. - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - if !hmac.Equal(sig, hasher.Sum(nil)) { - return ErrSignatureInvalid - } - - // No validation errors. Signature is good. - return nil -} - -// Sign implements token signing for the SigningMethod. -// Key must be []byte -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return EncodeSegment(hasher.Sum(nil)), nil - } - - return "", ErrInvalidKeyType -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go deleted file mode 100644 index 2700d64..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go +++ /dev/null @@ -1,151 +0,0 @@ -package jwt - -import ( - "encoding/json" - "errors" - "time" - // "fmt" -) - -// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding. -// This is the default claims type if you don't supply one -type MapClaims map[string]interface{} - -// VerifyAudience Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyAudience(cmp string, req bool) bool { - var aud []string - switch v := m["aud"].(type) { - case string: - aud = append(aud, v) - case []string: - aud = v - case []interface{}: - for _, a := range v { - vs, ok := a.(string) - if !ok { - return false - } - aud = append(aud, vs) - } - } - return verifyAud(aud, cmp, req) -} - -// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). -// If req is false, it will return true, if exp is unset. -func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { - cmpTime := time.Unix(cmp, 0) - - v, ok := m["exp"] - if !ok { - return !req - } - - switch exp := v.(type) { - case float64: - if exp == 0 { - return verifyExp(nil, cmpTime, req) - } - - return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req) - case json.Number: - v, _ := exp.Float64() - - return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req) - } - - return false -} - -// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat). -// If req is false, it will return true, if iat is unset. -func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { - cmpTime := time.Unix(cmp, 0) - - v, ok := m["iat"] - if !ok { - return !req - } - - switch iat := v.(type) { - case float64: - if iat == 0 { - return verifyIat(nil, cmpTime, req) - } - - return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req) - case json.Number: - v, _ := iat.Float64() - - return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req) - } - - return false -} - -// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). -// If req is false, it will return true, if nbf is unset. -func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { - cmpTime := time.Unix(cmp, 0) - - v, ok := m["nbf"] - if !ok { - return !req - } - - switch nbf := v.(type) { - case float64: - if nbf == 0 { - return verifyNbf(nil, cmpTime, req) - } - - return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req) - case json.Number: - v, _ := nbf.Float64() - - return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req) - } - - return false -} - -// VerifyIssuer compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { - iss, _ := m["iss"].(string) - return verifyIss(iss, cmp, req) -} - -// Valid validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (m MapClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - if !m.VerifyExpiresAt(now, false) { - // TODO(oxisto): this should be replaced with ErrTokenExpired - vErr.Inner = errors.New("Token is expired") - vErr.Errors |= ValidationErrorExpired - } - - if !m.VerifyIssuedAt(now, false) { - // TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued - vErr.Inner = errors.New("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if !m.VerifyNotBefore(now, false) { - // TODO(oxisto): this should be replaced with ErrTokenNotValidYet - vErr.Inner = errors.New("Token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/none.go b/vendor/github.com/golang-jwt/jwt/v4/none.go deleted file mode 100644 index f19835d..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/none.go +++ /dev/null @@ -1,52 +0,0 @@ -package jwt - -// SigningMethodNone implements the none signing method. This is required by the spec -// but you probably should never use it. -var SigningMethodNone *signingMethodNone - -const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" - -var NoneSignatureTypeDisallowedError error - -type signingMethodNone struct{} -type unsafeNoneMagicConstant string - -func init() { - SigningMethodNone = &signingMethodNone{} - NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) - - RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { - return SigningMethodNone - }) -} - -func (m *signingMethodNone) Alg() string { - return "none" -} - -// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { - // Key must be UnsafeAllowNoneSignatureType to prevent accidentally - // accepting 'none' signing method - if _, ok := key.(unsafeNoneMagicConstant); !ok { - return NoneSignatureTypeDisallowedError - } - // If signing method is none, signature must be an empty string - if signature != "" { - return NewValidationError( - "'none' signing method with non-empty signature", - ValidationErrorSignatureInvalid, - ) - } - - // Accept 'none' signing method. - return nil -} - -// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { - if _, ok := key.(unsafeNoneMagicConstant); ok { - return "", nil - } - return "", NoneSignatureTypeDisallowedError -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go deleted file mode 100644 index 2f61a69..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/parser.go +++ /dev/null @@ -1,170 +0,0 @@ -package jwt - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -type Parser struct { - // If populated, only these methods will be considered valid. - // - // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. - ValidMethods []string - - // Use JSON Number format in JSON decoder. - // - // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. - UseJSONNumber bool - - // Skip claims validation during token parsing. - // - // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. - SkipClaimsValidation bool -} - -// NewParser creates a new Parser with the specified options -func NewParser(options ...ParserOption) *Parser { - p := &Parser{} - - // loop through our parsing options and apply them - for _, option := range options { - option(p) - } - - return p -} - -// Parse parses, validates, verifies the signature and returns the parsed token. -// keyFunc will receive the parsed token and should return the key for validating. -func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) -} - -func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - token, parts, err := p.ParseUnverified(tokenString, claims) - if err != nil { - return token, err - } - - // Verify signing method is in the required set - if p.ValidMethods != nil { - var signingMethodValid = false - var alg = token.Method.Alg() - for _, m := range p.ValidMethods { - if m == alg { - signingMethodValid = true - break - } - } - if !signingMethodValid { - // signing method is not in the listed set - return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) - } - } - - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) - } - if key, err = keyFunc(token); err != nil { - // keyFunc returned an error - if ve, ok := err.(*ValidationError); ok { - return token, ve - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} - } - - vErr := &ValidationError{} - - // Validate Claims - if !p.SkipClaimsValidation { - if err := token.Claims.Valid(); err != nil { - - // If the Claims Valid returned an error, check if it is a validation error, - // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set - if e, ok := err.(*ValidationError); !ok { - vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} - } else { - vErr = e - } - } - } - - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr -} - -// ParseUnverified parses the token but doesn't validate the signature. -// -// WARNING: Don't use this method unless you know what you're doing. -// -// It's only ever useful in cases where you know the signature is valid (because it has -// been checked previously in the stack) and you want to extract values from it. -func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { - parts = strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - token = &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error - if err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) - } - - return token, parts, nil -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go deleted file mode 100644 index 6ea6f95..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go +++ /dev/null @@ -1,29 +0,0 @@ -package jwt - -// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add -// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that -// takes a *Parser type as input and manipulates its configuration accordingly. -type ParserOption func(*Parser) - -// WithValidMethods is an option to supply algorithm methods that the parser will check. Only those methods will be considered valid. -// It is heavily encouraged to use this option in order to prevent attacks such as https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/. -func WithValidMethods(methods []string) ParserOption { - return func(p *Parser) { - p.ValidMethods = methods - } -} - -// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber -func WithJSONNumber() ParserOption { - return func(p *Parser) { - p.UseJSONNumber = true - } -} - -// WithoutClaimsValidation is an option to disable claims validation. This option should only be used if you exactly know -// what you are doing. -func WithoutClaimsValidation() ParserOption { - return func(p *Parser) { - p.SkipClaimsValidation = true - } -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go deleted file mode 100644 index b910b19..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/rsa.go +++ /dev/null @@ -1,101 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// SigningMethodRSA implements the RSA family of signing methods. -// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Verify implements token verification for the SigningMethod -// For this signing method, must be an *rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - var ok bool - - if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return ErrInvalidKeyType - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Sign implements token signing for the SigningMethod -// For this signing method, must be an *rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - var ok bool - - // Validate type of key - if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go deleted file mode 100644 index 4fd6f9e..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go +++ /dev/null @@ -1,143 +0,0 @@ -//go:build go1.4 -// +build go1.4 - -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods -type SigningMethodRSAPSS struct { - *SigningMethodRSA - Options *rsa.PSSOptions - // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. - // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow - // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. - // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. - VerifyOptions *rsa.PSSOptions -} - -// Specific instances for RS/PS and company. -var ( - SigningMethodPS256 *SigningMethodRSAPSS - SigningMethodPS384 *SigningMethodRSAPSS - SigningMethodPS512 *SigningMethodRSAPSS -) - -func init() { - // PS256 - SigningMethodPS256 = &SigningMethodRSAPSS{ - SigningMethodRSA: &SigningMethodRSA{ - Name: "PS256", - Hash: crypto.SHA256, - }, - Options: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }, - VerifyOptions: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }, - } - RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { - return SigningMethodPS256 - }) - - // PS384 - SigningMethodPS384 = &SigningMethodRSAPSS{ - SigningMethodRSA: &SigningMethodRSA{ - Name: "PS384", - Hash: crypto.SHA384, - }, - Options: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }, - VerifyOptions: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }, - } - RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { - return SigningMethodPS384 - }) - - // PS512 - SigningMethodPS512 = &SigningMethodRSAPSS{ - SigningMethodRSA: &SigningMethodRSA{ - Name: "PS512", - Hash: crypto.SHA512, - }, - Options: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }, - VerifyOptions: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }, - } - RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { - return SigningMethodPS512 - }) -} - -// Verify implements token verification for the SigningMethod. -// For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - switch k := key.(type) { - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - opts := m.Options - if m.VerifyOptions != nil { - opts = m.VerifyOptions - } - - return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) -} - -// Sign implements token signing for the SigningMethod. -// For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go deleted file mode 100644 index 1966c45..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go +++ /dev/null @@ -1,105 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key") - ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") -) - -// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password -// -// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock -// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative -// in the Go standard library for now. See https://github.com/golang/go/issues/8860. -func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - - var blockDecrypted []byte - if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { - return nil, err - } - - if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go deleted file mode 100644 index 241ae9c..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go +++ /dev/null @@ -1,46 +0,0 @@ -package jwt - -import ( - "sync" -) - -var signingMethods = map[string]func() SigningMethod{} -var signingMethodLock = new(sync.RWMutex) - -// SigningMethod can be used add new methods for signing or verifying tokens. -type SigningMethod interface { - Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') -} - -// RegisterSigningMethod registers the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethodLock.Lock() - defer signingMethodLock.Unlock() - - signingMethods[alg] = f -} - -// GetSigningMethod retrieves a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - signingMethodLock.RLock() - defer signingMethodLock.RUnlock() - - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} - -// GetAlgorithms returns a list of registered "alg" names -func GetAlgorithms() (algs []string) { - signingMethodLock.RLock() - defer signingMethodLock.RUnlock() - - for alg := range signingMethods { - algs = append(algs, alg) - } - return -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf deleted file mode 100644 index 53745d5..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf +++ /dev/null @@ -1 +0,0 @@ -checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"] diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go deleted file mode 100644 index 3cb0f3f..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/token.go +++ /dev/null @@ -1,127 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "strings" - "time" -) - -// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515 -// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations -// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global -// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. -// To use the non-recommended decoding, set this boolean to `true` prior to using this package. -var DecodePaddingAllowed bool - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Keyfunc will be used by the Parse methods as a callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use properties in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// Token represents a JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims Claims // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// New creates a new Token with the specified signing method and an empty map of claims. -func New(method SigningMethod) *Token { - return NewWithClaims(method, MapClaims{}) -} - -// NewWithClaims creates a new Token with the specified signing method and claims. -func NewWithClaims(method SigningMethod, claims Claims) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: claims, - Method: method, - } -} - -// SignedString creates and returns a complete, signed JWT. -// The token is signed using the SigningMethod specified in the token. -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// SigningString generates the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - var jsonValue []byte - - if jsonValue, err = json.Marshal(t.Header); err != nil { - return "", err - } - header := EncodeSegment(jsonValue) - - if jsonValue, err = json.Marshal(t.Claims); err != nil { - return "", err - } - claim := EncodeSegment(jsonValue) - - return strings.Join([]string{header, claim}, "."), nil -} - -// Parse parses, validates, verifies the signature and returns the parsed token. -// keyFunc will receive the parsed token and should return the cryptographic key -// for verifying the signature. -// The caller is strongly encouraged to set the WithValidMethods option to -// validate the 'alg' claim in the token matches the expected algorithm. -// For more details about the importance of validating the 'alg' claim, -// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ -func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { - return NewParser(options...).Parse(tokenString, keyFunc) -} - -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { - return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) -} - -// EncodeSegment encodes a JWT specific base64url encoding with padding stripped -// -// Deprecated: In a future release, we will demote this function to a non-exported function, since it -// should only be used internally -func EncodeSegment(seg []byte) string { - return base64.RawURLEncoding.EncodeToString(seg) -} - -// DecodeSegment decodes a JWT specific base64url encoding with padding stripped -// -// Deprecated: In a future release, we will demote this function to a non-exported function, since it -// should only be used internally -func DecodeSegment(seg string) ([]byte, error) { - if DecodePaddingAllowed { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - return base64.URLEncoding.DecodeString(seg) - } - - return base64.RawURLEncoding.DecodeString(seg) -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go deleted file mode 100644 index ac8e140..0000000 --- a/vendor/github.com/golang-jwt/jwt/v4/types.go +++ /dev/null @@ -1,145 +0,0 @@ -package jwt - -import ( - "encoding/json" - "fmt" - "math" - "reflect" - "strconv" - "time" -) - -// TimePrecision sets the precision of times and dates within this library. -// This has an influence on the precision of times when comparing expiry or -// other related time fields. Furthermore, it is also the precision of times -// when serializing. -// -// For backwards compatibility the default precision is set to seconds, so that -// no fractional timestamps are generated. -var TimePrecision = time.Second - -// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially -// its MarshalJSON function. -// -// If it is set to true (the default), it will always serialize the type as an -// array of strings, even if it just contains one element, defaulting to the behaviour -// of the underlying []string. If it is set to false, it will serialize to a single -// string, if it contains one element. Otherwise, it will serialize to an array of strings. -var MarshalSingleStringAsArray = true - -// NumericDate represents a JSON numeric date value, as referenced at -// https://datatracker.ietf.org/doc/html/rfc7519#section-2. -type NumericDate struct { - time.Time -} - -// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct. -// It will truncate the timestamp according to the precision specified in TimePrecision. -func NewNumericDate(t time.Time) *NumericDate { - return &NumericDate{t.Truncate(TimePrecision)} -} - -// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a -// UNIX epoch with the float fraction representing non-integer seconds. -func newNumericDateFromSeconds(f float64) *NumericDate { - round, frac := math.Modf(f) - return NewNumericDate(time.Unix(int64(round), int64(frac*1e9))) -} - -// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch -// represented in NumericDate to a byte array, using the precision specified in TimePrecision. -func (date NumericDate) MarshalJSON() (b []byte, err error) { - var prec int - if TimePrecision < time.Second { - prec = int(math.Log10(float64(time.Second) / float64(TimePrecision))) - } - truncatedDate := date.Truncate(TimePrecision) - - // For very large timestamps, UnixNano would overflow an int64, but this - // function requires nanosecond level precision, so we have to use the - // following technique to get round the issue: - // 1. Take the normal unix timestamp to form the whole number part of the - // output, - // 2. Take the result of the Nanosecond function, which retuns the offset - // within the second of the particular unix time instance, to form the - // decimal part of the output - // 3. Concatenate them to produce the final result - seconds := strconv.FormatInt(truncatedDate.Unix(), 10) - nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64) - - output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...) - - return output, nil -} - -// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a -// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch -// with either integer or non-integer seconds. -func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { - var ( - number json.Number - f float64 - ) - - if err = json.Unmarshal(b, &number); err != nil { - return fmt.Errorf("could not parse NumericData: %w", err) - } - - if f, err = number.Float64(); err != nil { - return fmt.Errorf("could not convert json number value to float: %w", err) - } - - n := newNumericDateFromSeconds(f) - *date = *n - - return nil -} - -// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string. -// This type is necessary, since the "aud" claim can either be a single string or an array. -type ClaimStrings []string - -func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { - var value interface{} - - if err = json.Unmarshal(data, &value); err != nil { - return err - } - - var aud []string - - switch v := value.(type) { - case string: - aud = append(aud, v) - case []string: - aud = ClaimStrings(v) - case []interface{}: - for _, vv := range v { - vs, ok := vv.(string) - if !ok { - return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} - } - aud = append(aud, vs) - } - case nil: - return nil - default: - return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} - } - - *s = aud - - return -} - -func (s ClaimStrings) MarshalJSON() (b []byte, err error) { - // This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field, - // only contains one element, it MAY be serialized as a single string. This may or may not be - // desired based on the ecosystem of other JWT library used, so we make it configurable by the - // variable MarshalSingleStringAsArray. - if len(s) == 1 && !MarshalSingleStringAsArray { - return json.Marshal(s[0]) - } - - return json.Marshal([]string(s)) -} diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 0000000..fd736cb --- /dev/null +++ b/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 0000000..8c8c37d --- /dev/null +++ b/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/vendor/cloud.google.com/go/compute/LICENSE b/vendor/github.com/google/pprof/LICENSE similarity index 100% rename from vendor/cloud.google.com/go/compute/LICENSE rename to vendor/github.com/google/pprof/LICENSE diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 0000000..c8a1beb --- /dev/null +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,588 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" + "strings" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = b.tmpLines[:0] // Use shared space temporarily + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + if strings.HasPrefix(m.File, "[kernel.kallsyms]") { + m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "") + } + + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + // Pre-allocate space for all locations. + numLocations := 0 + for _, s := range p.Sample { + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + + for _, s := range p.Sample { + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + } + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 0000000..c794b93 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,274 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 0000000..bef1d60 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 0000000..91f45e5 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 0000000..8d07fd6 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1228 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// +// The last stack trace is of the form: +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 0000000..4b66282 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,667 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "encoding/binary" + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = makeLocationIDMap(len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID locationIDMap + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) + } + + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) + } + + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } + } + putNumber(0) // Delimiter + + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } + } + + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } + } + + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l := pm.locationsByID.get(src.ID); l != nil { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID.set(src.ID, ll) + return ll + } + pm.locationsByID.set(src.ID, l) + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*2) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 0000000..60ef7e9 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,856 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. + NumLabel map[string][]int64 + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = io.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 0000000..a15696b --- /dev/null +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,367 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, int64(u)) + } + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, u) + } + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 0000000..b2f9fd5 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,194 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + if pruneFromHere(fn.Name) { + break + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md new file mode 100644 index 0000000..0a1ff9f --- /dev/null +++ b/vendor/github.com/imdario/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 7e6f7ae..4f02874 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,6 +1,5 @@ # Mergo - [![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] @@ -9,6 +8,7 @@ [![Sourcegraph][11]][12] [![FOSSA Status][13]][14] [![Become my sponsor][15]][16] +[![Tidelift][17]][18] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -26,6 +26,8 @@ [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield [15]: https://img.shields.io/github/sponsors/imdario [16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -55,7 +57,6 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont ### Mergo in the wild -- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md new file mode 100644 index 0000000..a5de61f --- /dev/null +++ b/vendor/github.com/imdario/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index a13a7ee..b50d5c2 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } zeroValue := reflect.Value{} switch dst.Kind() { @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } @@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8b4e2f4..0ef9b21 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool { } type Config struct { + Transformers Transformers Overwrite bool + ShouldNotDereference bool AppendSlice bool TypeCheck bool - Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool sliceDeepCopy bool @@ -76,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { @@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { dst.Set(src) } } @@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if src.Kind() != reflect.Map { - if overwrite { + if overwrite && dst.CanSet() { dst.Set(src) } return @@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } @@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } } - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } case reflect.Slice: if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if src.Kind() != reflect.Interface { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } } } else if dst.Elem().Type() == src.Type() { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { @@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } break @@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co break } default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) if mustSet { if dst.CanSet() { dst.Set(src) @@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + // WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true @@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) { func merge(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 9fe362d..0a721e2 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,7 +20,7 @@ var ( ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") + ErrNonPointerArgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -28,13 +28,13 @@ var ( // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { - ptr uintptr typ reflect.Type next *visit + ptr uintptr } // From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 @@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool { if v.IsNil() { return true } - return isEmptyValue(v.Elem()) + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false case reflect.Func: return v.IsNil() case reflect.Invalid: diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index cd43074..5e779fe 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,312 @@ +## 2.9.2 + +### Maintenance +- Bump github.com/go-task/slim-sprig (#1167) [3fcc5bf] +- Bump github.com/onsi/gomega from 1.27.3 to 1.27.4 (#1163) [6143ffe] + +## 2.9.1 + +### Fixes +This release fixes a longstanding issue where `ginkgo -coverpkg=./...` would not work. This is now resolved and fixes [#1161](https://github.com/onsi/ginkgo/issues/1161) and [#995](https://github.com/onsi/ginkgo/issues/995) +- Support -coverpkg=./... [26ca1b5] +- document coverpkg a bit more clearly [fc44c3b] + +### Maintenance +- bump various dependencies +- Improve Documentation and fix typo (#1158) [93de676] + +## 2.9.0 + +### Features +- AttachProgressReporter is an experimental feature that allows users to provide arbitrary information when a ProgressReport is requested [28801fe] + +- GinkgoT() has been expanded to include several Ginkgo-specific methods [2bd5a3b] + + The intent is to enable the development of third-party libraries that integrate deeply with Ginkgo using `GinkgoT()` to access Ginkgo's functionality. + +## 2.8.4 + +### Features +- Add OmitSuiteSetupNodes to JunitReportConfig (#1147) [979fbc2] +- Add a reference to ginkgolinter in docs.index.md (#1143) [8432589] + +### Fixes +- rename tools hack to see if it fixes things for downstream users [a8bb39a] + +### Maintenance +- Bump golang.org/x/text (#1144) [41b2a8a] +- Bump github.com/onsi/gomega from 1.27.0 to 1.27.1 (#1142) [7c4f583] + +## 2.8.3 + +Released to fix security issue in golang.org/x/net dependency + +### Maintenance + +- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#1141) [fc1a02e] +- remove tools.go hack from documentation [0718693] + +## 2.8.2 + +Ginkgo now includes a `tools.go` file in the root directory of the `ginkgo` package. This should allow modules that simply `go get github.com/onsi/ginkgo/v2` to also pull in the CLI dependencies. This obviates the need for consumers of Ginkgo to have their own `tools.go` file and makes it simpler to ensure that the version of the `ginkgo` CLI being used matches the version of the library. You can simply run `go run github.com/onsi/ginkgo/v2/ginkgo` to run the version of the cli associated with your package go.mod. + +### Maintenance + +- Bump github.com/onsi/gomega from 1.26.0 to 1.27.0 (#1139) [5767b0a] +- Fix minor typos (#1138) [e1e9723] +- Fix link in V2 Migration Guide (#1137) [a588f60] + +## 2.8.1 + +### Fixes +- lock around default report output to avoid triggering the race detector when calling By from goroutines [2d5075a] +- don't run ReportEntries through sprintf [febbe38] + +### Maintenance +- Bump golang.org/x/tools from 0.5.0 to 0.6.0 (#1135) [11a4860] +- test: update matrix for Go 1.20 (#1130) [4890a62] +- Bump golang.org/x/sys from 0.4.0 to 0.5.0 (#1133) [a774638] +- Bump github.com/onsi/gomega from 1.25.0 to 1.26.0 (#1120) [3f233bd] +- Bump github-pages from 227 to 228 in /docs (#1131) [f9b8649] +- Bump activesupport from 6.0.6 to 6.0.6.1 in /docs (#1127) [6f8c042] +- Update index.md with instructions on how to upgrade Ginkgo [833a75e] + +## 2.8.0 + +### Features + +- Introduce GinkgoHelper() to track and exclude helper functions from potential CodeLocations [e19f556] + +Modeled after `testing.T.Helper()`. Now, rather than write code like: + +```go +func helper(model Model) { + Expect(model).WithOffset(1).To(BeValid()) + Expect(model.SerialNumber).WithOffset(1).To(MatchRegexp(/[a-f0-9]*/)) +} +``` + +you can stop tracking offsets (which makes nesting composing helpers nearly impossible) and simply write: + +```go +func helper(model Model) { + GinkgoHelper() + Expect(model).To(BeValid()) + Expect(model.SerialNumber).To(MatchRegexp(/[a-f0-9]*/)) +} +``` + +- Introduce GinkgoLabelFilter() and Label().MatchesLabelFilter() to make it possible to programmatically match filters (fixes #1119) [2f6597c] + +You can now write code like this: + +```go +BeforeSuite(func() { + if Label("slow").MatchesLabelFilter(GinkgoLabelFilter()) { + // do slow setup + } + + if Label("fast").MatchesLabelFilter(GinkgoLabelFilter()) { + // do fast setup + } +}) +``` + +to programmatically check whether a given set of labels will match the configured `--label-filter`. + +### Maintenance + +- Bump webrick from 1.7.0 to 1.8.1 in /docs (#1125) [ea4966e] +- cdeql: add ruby language (#1124) [9dd275b] +- dependabot: add bundler package-ecosystem for docs (#1123) [14e7bdd] + +## 2.7.1 + +### Fixes +- Bring back SuiteConfig.EmitSpecProgress to avoid compilation issue for consumers that set it manually [d2a1cb0] + +### Maintenance +- Bump github.com/onsi/gomega from 1.24.2 to 1.25.0 (#1118) [cafece6] +- Bump golang.org/x/tools from 0.4.0 to 0.5.0 (#1111) [eda66c2] +- Bump golang.org/x/sys from 0.3.0 to 0.4.0 (#1112) [ac5ccaa] +- Bump github.com/onsi/gomega from 1.24.1 to 1.24.2 (#1097) [eee6480] + +## 2.7.0 + +### Features +- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails. +- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242] +- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98] +- Color aliases for custom color support (#1101) [49fab7a] + +### Fixes +- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20] +- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container. +- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b] + +## 2.6.1 + +### Features +- Override formatter colors from envvars - this is a new feature but an alternative approach involving config files might be taken in the future (#1095) [60240d1] + +### Fixes +- GinkgoRecover now supports ignoring panics that match a specific, hidden, interface [301f3e2] + +### Maintenance +- Bump github.com/onsi/gomega from 1.24.0 to 1.24.1 (#1077) [3643823] +- Bump golang.org/x/tools from 0.2.0 to 0.4.0 (#1090) [f9f856e] +- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#1091) [0d7087e] + +## 2.6.0 + +### Features +- `ReportBeforeSuite` provides access to the suite report before the suite begins. +- Add junit config option for omitting leafnodetype (#1088) [956e6d2] +- Add support to customize junit report config to omit spec labels (#1087) [de44005] + +### Fixes +- Fix stack trace pruning so that it has a chance of working on windows [2165648] + +## 2.5.1 + +### Fixes +- skipped tests only show as 'S' when running with -v [3ab38ae] +- Fix typo in docs/index.md (#1082) [55fc58d] +- Fix typo in docs/index.md (#1081) [8a14f1f] +- Fix link notation in docs/index.md (#1080) [2669612] +- Fix typo in `--progress` deprecation message (#1076) [b4b7edc] + +### Maintenance +- chore: Included githubactions in the dependabot config (#976) [baea341] +- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297] + +## 2.5.0 + +### Ginkgo output now includes a timeline-view of the spec + +This commit changes Ginkgo's default output. Spec details are now +presented as a **timeline** that includes events that occur during the spec +lifecycle interleaved with any GinkgoWriter content. This makes is much easier +to understand the flow of a spec and where a given failure occurs. + +The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags +and the SuppressProgressReporting decorator have all been deprecated. Instead +the existing -v and -vv flags better capture the level of verbosity to display. However, +a new --show-node-events flag is added to include node `> Enter` and `< Exit` events +in the spec timeline. + +In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit +reports can be configured and generated using +`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)` + +Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that +was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format +to build tooling on top of as it has stronger guarantees to be stable from version to version. + +### Features +- Provide details about which timeout expired [0f2fa27] + +### Fixes +- Add Support Policy to docs [c70867a] + +### Maintenance +- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2] + +## 2.4.0 + +### Features + +- DeferCleanup supports functions with multiple-return values [5e33c75] +- Add GinkgoLogr (#1067) [bf78c28] +- Introduction of 'MustPassRepeatedly' decorator (#1051) [047c02f] + +### Fixes +- correcting some typos (#1064) [1403d3c] +- fix flaky internal_integration interrupt specs [2105ba3] +- Correct busted link in README [be6b5b9] + +### Maintenance +- Bump actions/checkout from 2 to 3 (#1062) [8a2f483] +- Bump golang.org/x/tools from 0.1.12 to 0.2.0 (#1065) [529c4e8] +- Bump github/codeql-action from 1 to 2 (#1061) [da09146] +- Bump actions/setup-go from 2 to 3 (#1060) [918040d] +- Bump github.com/onsi/gomega from 1.22.0 to 1.22.1 (#1053) [2098e4d] +- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#1066) [1d74122] +- Add GHA to dependabot config [4442772] + +## 2.3.1 + +## Fixes +Several users were invoking `ginkgo` by installing the latest version of the cli via `go install github.com/onsi/ginkgo/v2/ginkgo@latest`. When 2.3.0 was released this resulted in an influx of issues as CI systems failed due to a change in the internal contract between the Ginkgo CLI and the Ginkgo library. Ginkgo only supports running the same version of the library as the cli (which is why both are packaged in the same repository). + +With this patch release, the ginkgo CLI can now identify a version mismatch and emit a helpful error message. + +- Ginkgo cli can identify version mismatches and emit a helpful error message [bc4ae2f] +- further emphasize that a version match is required when running Ginkgo on CI and/or locally [2691dd8] + +### Maintenance +- bump gomega to v1.22.0 [822a937] + +## 2.3.0 + +### Interruptible Nodes and Timeouts + +Ginkgo now supports per-node and per-spec timeouts on interruptible nodes. Check out the [documentation for all the details](https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes) but the gist is you can now write specs like this: + +```go +It("is interruptible", func(ctx SpecContext) { // or context.Context instead of SpecContext, both are valid. + // do things until `ctx.Done()` is closed, for example: + req, err := http.NewRequestWithContext(ctx, "POST", "/build-widgets", nil) + Expect(err).NotTo(HaveOccured()) + _, err := http.DefaultClient.Do(req) + Expect(err).NotTo(HaveOccured()) + + Eventually(client.WidgetCount).WithContext(ctx).Should(Equal(17)) +}, NodeTimeout(time.Second*20), GracePeriod(5*time.Second)) +``` + +and have Ginkgo ensure that the node completes before the timeout elapses. If it does elapse, or if an external interrupt is received (e.g. `^C`) then Ginkgo will cancel the context and wait for the Grace Period for the node to exit before proceeding with any cleanup nodes associated with the spec. The `ctx` provided by Ginkgo can also be passed down to Gomega's `Eventually` to have all assertions within the node governed by a single deadline. + +### Features + +- Ginkgo now records any additional failures that occur during the cleanup of a failed spec. In prior versions this information was quietly discarded, but the introduction of a more rigorous approach to timeouts and interruptions allows Ginkgo to better track subsequent failures. +- `SpecContext` also provides a mechanism for third-party libraries to provide additional information when a Progress Report is generated. Gomega uses this to provide the current state of an `Eventually().WithContext()` assertion when a Progress Report is requested. +- DescribeTable now exits with an error if it is not passed any Entries [a4c9865] + +## Fixes +- fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency [92c88d5] +- Make the outline command able to use the DSL import [1be2427] + +## Maintenance +- chore(docs): delete no meaning d [57c373c] +- chore(docs): Fix hyperlinks [30526d5] +- chore(docs): fix code blocks without language settings [cf611c4] +- fix intra-doc link [b541bcb] + +## 2.2.0 + +### Generate real-time Progress Reports [f91377c] + +Ginkgo can now generate Progress Reports to point users at the current running line of code (including a preview of the actual source code) and a best guess at the most relevant subroutines. + +These Progress Reports allow users to debug stuck or slow tests without exiting the Ginkgo process. A Progress Report can be generated at any time by sending Ginkgo a `SIGINFO` (`^T` on MacOS/BSD) or `SIGUSR1`. + +In addition, the user can specify `--poll-progress-after` and `--poll-progress-interval` to have Ginkgo start periodically emitting progress reports if a given node takes too long. These can be overriden/set on a per-node basis with the `PollProgressAfter` and `PollProgressInterval` decorators. + +Progress Reports are emitted to stdout, and also stored in the machine-redable report formats that Ginkgo supports. + +Ginkgo also uses this progress reporting infrastructure under the hood when handling timeouts and interrupts. This yields much more focused, useful, and informative stack traces than previously. + +### Features +- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes. + + As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature. + +### Maintenance +- Modernize the invocation of Ginkgo in github actions [0ffde58] +- Update reocmmended CI settings in docs [896bbb9] +- Speed up unnecessarily slow integration test [6d3a90e] + ## 2.1.6 ### Fixes @@ -77,7 +386,7 @@ See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkg Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC. 1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess -You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc` +You can silence the RC advertisement by setting an `ACK_GINKGO_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc` ## 1.16.4 @@ -184,7 +493,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4] - improve ginkgo performance - makes progress on #644 [a14f98e] - fix convert integration tests [1f8ba69] -- fix typo succesful -> successful (#663) [1ea49cf] +- fix typo successful -> successful (#663) [1ea49cf] - Fix invalid link (#658) [b886136] - convert utility : Include comments from source (#657) [1077c6d] - Explain what BDD means [d79e7fb] @@ -278,7 +587,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6] - all: gofmt [000d317] - Increase eventually timeout to 30s [c73579c] -- Clarify asynchronous test behaviour [294d8f4] +- Clarify asynchronous test behavior [294d8f4] - Travis badge should only show master [26d2143] ## 1.5.0 5/10/2018 @@ -296,13 +605,13 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0] - `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd] - Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98] -- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c] +- Increase the threshold when checking time measurements (#455) [2f714bf, 68f622c] - Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b] - Add an extra new line after reporting spec run completion for test2json [874520d] - added name name field to junit reported testsuite [ae61c63] - Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856] - Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe] -- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] +- Synchronies the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] - Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed] - Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8] - Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598] diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 1507940..1da92fe 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md +++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -8,6 +8,6 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. - Make sure all the tests succeed via `ginkgo -r -p` - Vet your changes via `go vet ./...` -- Update the documentation. Ginko uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. +- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. Thanks for supporting Ginkgo! \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md index 58507c3..d0473a4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/README.md +++ b/vendor/github.com/onsi/ginkgo/v2/README.md @@ -4,11 +4,7 @@ --- -# Ginkgo 2.0 is now Generally Available! - -You can learn more about 2.0 in the [Migration Guide](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)! - ---- +# Ginkgo Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly: @@ -33,53 +29,53 @@ Describe("Checking books out of the library", Label("library"), func() { }) When("the library has the book in question", func() { - BeforeEach(func() { - Expect(library.Store(book)).To(Succeed()) + BeforeEach(func(ctx SpecContext) { + Expect(library.Store(ctx, book)).To(Succeed()) }) Context("and the book is available", func() { - It("lends it to the reader", func() { - Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) + It("lends it to the reader", func(ctx SpecContext) { + Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed()) Expect(valjean.Books()).To(ContainElement(book)) - Expect(library.UserWithBook(book)).To(Equal(valjean)) - }) + Expect(library.UserWithBook(ctx, book)).To(Equal(valjean)) + }, SpecTimeout(time.Second * 5)) }) Context("but the book has already been checked out", func() { var javert *users.User - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { javert = users.NewUser("Javert") - Expect(javert.Checkout(library, "Les Miserables")).To(Succeed()) + Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed()) }) - It("tells the user", func() { - err := valjean.Checkout(library, "Les Miserables") + It("tells the user", func(ctx SpecContext) { + err := valjean.Checkout(ctx, library, "Les Miserables") Expect(error).To(MatchError("Les Miserables is currently checked out")) - }) + }, SpecTimeout(time.Second * 5)) - It("lets the user place a hold and get notified later", func() { - Expect(valjean.Hold(library, "Les Miserables")).To(Succeed()) - Expect(valjean.Holds()).To(ContainElement(book)) + It("lets the user place a hold and get notified later", func(ctx SpecContext) { + Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Holds(ctx)).To(ContainElement(book)) By("when Javert returns the book") - Expect(javert.Return(library, book)).To(Succeed()) + Expect(javert.Return(ctx, library, book)).To(Succeed()) By("it eventually informs Valjean") notification := "Les Miserables is ready for pick up" - Eventually(valjean.Notifications).Should(ContainElement(notification)) + Eventually(ctx, valjean.Notifications).Should(ContainElement(notification)) - Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) - Expect(valjean.Books()).To(ContainElement(book)) - Expect(valjean.Holds()).To(BeEmpty()) - }) + Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Books(ctx)).To(ContainElement(book)) + Expect(valjean.Holds(ctx)).To(BeEmpty()) + }, SpecTimeout(time.Second * 10)) }) }) When("the library does not have the book in question", func() { - It("tells the reader the book is unavailable", func() { - err := valjean.Checkout(library, "Les Miserables") + It("tells the reader the book is unavailable", func(ctx SpecContext) { + err := valjean.Checkout(ctx, library, "Les Miserables") Expect(error).To(MatchError("Les Miserables is not in the library catalog")) - }) + }, SpecTimeout(time.Second * 5)) }) }) ``` @@ -90,9 +86,9 @@ If you have a question, comment, bug report, feature request, etc. please open a ## Capabilities -Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://olivinelabs.com/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. +Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. -With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs) +With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs). At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as @@ -100,7 +96,7 @@ At runtime, Ginkgo can run your specs in reproducibly [random order](https://ons ginkgo -p ``` -By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. +By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up. As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically). diff --git a/vendor/github.com/onsi/ginkgo/v2/RELEASING.md b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md index 0c80f66..363815d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/RELEASING.md +++ b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md @@ -1,7 +1,13 @@ A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: 1. Ensure CHANGELOG.md is up to date. - - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Use + ```bash + LAST_VERSION=$(git tag --sort=version:refname | tail -n1) + CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION) + echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md + ``` + to update the changelog - Categorize the changes into - Breaking Changes (requires a major version) - New Features (minor version) diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index aaede5c..a244bdc 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -21,8 +21,8 @@ import ( "os" "path/filepath" "strings" - "time" + "github.com/go-logr/logr" "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal" "github.com/onsi/ginkgo/v2/internal/global" @@ -46,7 +46,9 @@ func init() { var err error flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig) exitIfErr(err) - GinkgoWriter = internal.NewWriter(os.Stdout) + writer := internal.NewWriter(os.Stdout) + GinkgoWriter = writer + GinkgoLogr = internal.GinkgoLogrFunc(writer) } func exitIfErr(err error) { @@ -77,7 +79,7 @@ func exitIfErrors(errors []error) { } } -//The interface implemented by GinkgoWriter +// The interface implemented by GinkgoWriter type GinkgoWriterInterface interface { io.Writer @@ -89,6 +91,15 @@ type GinkgoWriterInterface interface { ClearTeeWriters() } +/* +SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integration with Ginkgo). + +You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods. + +Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interrupt signal) or when a node has exceeded its allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation. +*/ +type SpecContext = internal.SpecContext + /* GinkgoWriter implements a GinkgoWriterInterface and io.Writer @@ -103,7 +114,12 @@ You can learn more at https://onsi.github.io/ginkgo/#logging-output */ var GinkgoWriter GinkgoWriterInterface -//The interface by which Ginkgo receives *testing.T +/* +GinkgoLogr is a logr.Logger that writes to GinkgoWriter +*/ +var GinkgoLogr logr.Logger + +// The interface by which Ginkgo receives *testing.T type GinkgoTestingT interface { Fail() } @@ -147,6 +163,29 @@ func GinkgoParallelProcess() int { return suiteConfig.ParallelProcess } +/* +GinkgoHelper marks the function it's called in as a test helper. When a failure occurs inside a helper function, Ginkgo will skip the helper when analyzing the stack trace to identify where the failure occurred. + +This is an alternative, simpler, mechanism to passing in a skip offset when calling Fail or using Gomega. +*/ +func GinkgoHelper() { + types.MarkAsHelper(1) +} + +/* +GinkgoLabelFilter() returns the label filter configured for this suite via `--label-filter`. + +You can use this to manually check if a set of labels would satisfy the filter via: + + if (Label("cat", "dog").MatchesLabelFilter(GinkgoLabelFilter())) { + //... + } +*/ +func GinkgoLabelFilter() string { + suiteConfig, _ := GinkgoConfiguration() + return suiteConfig.LabelFilter +} + /* PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant when running in parallel and output to stdout/stderr is being intercepted. You generally @@ -168,7 +207,7 @@ func PauseOutputInterception() { outputInterceptor.PauseIntercepting() } -//ResumeOutputInterception() - see docs for PauseOutputInterception() +// ResumeOutputInterception() - see docs for PauseOutputInterception() func ResumeOutputInterception() { if outputInterceptor == nil { return @@ -259,7 +298,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { } writer := GinkgoWriter.(*internal.Writer) - if reporterConfig.Verbose && suiteConfig.ParallelTotal == 1 { + if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 { writer.SetMode(internal.WriterModeStreamAndBuffer) } else { writer.SetMode(internal.WriterModeBufferOnly) @@ -277,7 +316,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(suiteConfig.Timeout, client), client, suiteConfig) + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) outputInterceptor.Shutdown() flagSet.ValidateDeprecations(deprecationTracker) @@ -353,6 +392,12 @@ func AbortSuite(message string, callerSkip ...int) { panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) } +/* +ignorablePanic is used by Gomega to signal to GinkgoRecover that Goemga is handling +the error associated with this panic. It i used when Eventually/Consistently are passed a func(g Gomega) and the resulting function launches a goroutines that makes a failed assertion. That failed assertion is registered by Gomega and then panics. Ordinarily the panic is captured by Gomega. In the case of a goroutine Gomega can't capture the panic - so we piggy back on GinkgoRecover so users have a single defer GinkgoRecover() pattern to follow. To do that we need to tell Ginkgo to ignore this panic and not register it as a panic on the global Failer. +*/ +type ignorablePanic interface{ GinkgoRecoverShouldIgnoreThisPanic() } + /* GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that @@ -368,6 +413,9 @@ You can learn more about how Ginkgo manages failures here: https://onsi.github.i func GinkgoRecover() { e := recover() if e != nil { + if _, ok := e.(ignorablePanic); ok { + return + } global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e) } } @@ -444,6 +492,8 @@ It nodes are Subject nodes that contain your spec code and assertions. Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure. +You can pass It nodes bare functions (func() {}) or functions that receive a SpecContext or context.Context: func(ctx SpecContext) {} and func (ctx context.Context) {}. If the function takes a context then the It is deemed interruptible and Ginkgo will cancel the context in the event of a timeout (configured via the SpecTimeout() or NodeTimeout() decorators) or of an interrupt signal. + You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ @@ -490,30 +540,11 @@ and will simply log the passed in text to the GinkgoWriter. If By is handed a f By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports. -Note that By does not generate a new Ginkgo node - rather it is simply synctactic sugar around GinkgoWriter and AddReportEntry +Note that By does not generate a new Ginkgo node - rather it is simply syntactic sugar around GinkgoWriter and AddReportEntry You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by */ func By(text string, callback ...func()) { - if !global.Suite.InRunPhase() { - exitIfErr(types.GinkgoErrors.ByNotDuringRunPhase(types.NewCodeLocation(1))) - } - value := struct { - Text string - Duration time.Duration - }{ - Text: text, - } - t := time.Now() - AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t) - formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor) - GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT))) - if len(callback) == 1 { - callback[0]() - value.Duration = time.Since(t) - } - if len(callback) > 1 { - panic("just one callback per By, please") - } + exitIfErr(global.Suite.By(text, callback...)) } /* @@ -522,11 +553,15 @@ When running in parallel, each parallel process will call BeforeSuite. You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. +BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure. You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite */ -func BeforeSuite(body func()) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", body)) +func BeforeSuite(body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)) } /* @@ -537,11 +572,15 @@ When running in parallel, each parallel process will call AfterSuite. You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. +AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within an AfterSuite node's closure. You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite */ -func AfterSuite(body func()) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", body)) +func AfterSuite(body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)) } /* @@ -552,19 +591,34 @@ information from that setup to all parallel processes. SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them. The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures: -The first function (which only runs on process #1) has the signature: +The first function (which only runs on process #1) can have any of the following the signatures: + func() + func(ctx context.Context) + func(ctx SpecContext) func() []byte + func(ctx context.Context) []byte + func(ctx SpecContext) []byte -The byte array returned by the first function is then passed to the second function, which has the signature: +The byte array returned by the first function (if present) is then passed to the second function, which can have any of the following signature: + func() + func(ctx context.Context) + func(ctx SpecContext) func(data []byte) + func(ctx context.Context, data []byte) + func(ctx SpecContext, data []byte) + +If either function receives a context.Context/SpecContext it is considered interruptible. You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure. You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite */ -func SynchronizedBeforeSuite(process1Body func() []byte, allProcessBody func([]byte)) bool { - return pushNode(internal.NewSynchronizedBeforeSuiteNode(process1Body, allProcessBody, types.NewCodeLocation(1))) +func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{process1Body, allProcessBody} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)) } /* @@ -573,21 +627,26 @@ and a piece that must only run once - on process #1. SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1 and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until -all other processes are finished. +all other processes are finished. These two functions can be bare functions (func()) or interruptible (func(context.Context)/func(SpecContext)) Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results. You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure. You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite */ -func SynchronizedAfterSuite(allProcessBody func(), process1Body func()) bool { - return pushNode(internal.NewSynchronizedAfterSuiteNode(allProcessBody, process1Body, types.NewCodeLocation(1))) +func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{allProcessBody, process1Body} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)) } /* BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes are defined in nested Container nodes the outermost BeforeEach node closures are run first. +BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach */ @@ -599,6 +658,8 @@ func BeforeEach(args ...interface{}) bool { JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure. This can allow you to separate configuration from creation of resources for a spec. +JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach */ @@ -612,6 +673,8 @@ are defined in nested Container nodes the innermost AfterEach node closures are Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results. +AfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within an AfterEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup */ @@ -622,6 +685,8 @@ func AfterEach(args ...interface{}) bool { /* JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec. +JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach */ @@ -634,6 +699,8 @@ BeforeAll nodes are Setup nodes that can occur inside Ordered containers. They Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. +BeforeAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a BeforeAll node's closure. You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall @@ -649,6 +716,8 @@ Multiple AfterAll nodes can be defined in a given Ordered container however they Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior. +AfterAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within an AfterAll node's closure. You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall @@ -662,15 +731,32 @@ DeferCleanup can be called within any Setup or Subject node to register a cleanu DeferCleanup can be passed: 1. A function that takes no arguments and returns no values. -2. A function that returns an error (in which case it will assert that the returned error was nil, or it will fail the spec). -3. A function that takes arguments (and optionally returns an error) followed by a list of arguments to passe to the function. For example: +2. A function that returns multiple values. `DeferCleanup` will ignore all these return values except for the last one. If this last return value is a non-nil error `DeferCleanup` will fail the spec). +3. A function that takes a context.Context or SpecContext (and optionally returns multiple values). The resulting cleanup node is deemed interruptible and the passed-in context will be cancelled in the event of a timeout or interrupt. +4. A function that takes arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function. +5. A function that takes SpecContext and a list of arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function. - BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") - }) +For example: -will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. + BeforeEach(func() { + DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) + os.SetEnv("FOO", "BAR") + }) + +will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. + +Similarly: + + BeforeEach(func() { + DeferCleanup(func(ctx SpecContext, path) { + req, err := http.NewRequestWithContext(ctx, "POST", path, nil) + Expect(err).NotTo(HaveOccured()) + _, err := http.DefaultClient.Do(req) + Expect(err).NotTo(HaveOccured()) + }, "example.com/cleanup", NodeTimeout(time.Second*3)) + }) + +will register a cleanup handler that will have three seconds to successfully complete a request to the specified path. Note that we do not specify a context in the list of arguments passed to DeferCleanup - only in the signature of the function we pass in. Ginkgo will detect the requested context and supply a SpecContext when it invokes the cleanup node. If you want to pass in your own context in addition to the Ginkgo-provided SpecContext you must specify the SpecContext as the first argument (e.g. func(ctx SpecContext, otherCtx context.Context)). When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node) When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node) @@ -683,5 +769,26 @@ func DeferCleanup(args ...interface{}) { fail := func(message string, cl types.CodeLocation) { global.Failer.Fail(message, cl) } - pushNode(internal.NewCleanupNode(fail, args...)) + pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...)) +} + +/* +AttachProgressReporter allows you to register a function that will be called whenever Ginkgo generates a Progress Report. The contents returned by the function will be included in the report. + +**This is an experimental feature and the public-facing interface may change in a future minor version of Ginkgo** + +Progress Reports are generated: +- whenever the user explicitly requests one (via `SIGINFO` or `SIGUSR1`) +- on nodes decorated with PollProgressAfter +- on suites run with --poll-progress-after +- whenever a test times out + +Ginkgo uses Progress Reports to convey the current state of the test suite, including any running goroutines. By attaching a progress reporter you are able to supplement these reports with additional information. + +# AttachProgressReporter returns a function that can be called to detach the progress reporter + +You can learn more about AttachProgressReporter here: https://onsi.github.io/ginkgo/#attaching-additional-information-to-progress-reports +*/ +func AttachProgressReporter(reporter func() string) func() { + return global.Suite.AttachProgressReporter(reporter) } diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go index 6ef4b75..c65af4c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -13,13 +13,21 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat type Offset = internal.Offset /* -FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass. +FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass. -You can learn more here: https://onsi.github.io/ginkgo/#repeating-spec-runs-and-managing-flaky-specs +You can learn more here: https://onsi.github.io/ginkgo/#the-flakeattempts-decorator You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference */ type FlakeAttempts = internal.FlakeAttempts +/* +MustPassRepeatedly(uint N) is a decorator that allows you to repeat the execution of individual specs or spec containers. Ginkgo will run them up to `N` times until they fail. + +You can learn more here: https://onsi.github.io/ginkgo/#the-mustpassrepeatedly-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +type MustPassRepeatedly = internal.MustPassRepeatedly + /* Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe. @@ -38,7 +46,7 @@ const Pending = internal.Pending /* Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs. -Tests in ordered containers cannot be marked as serial - mark the ordered container instead. +Specs in ordered containers cannot be marked as serial - mark the ordered container instead. You can learn more here: https://onsi.github.io/ginkgo/#serial-specs You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference @@ -46,7 +54,7 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat const Serial = internal.Serial /* -Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear. +Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear. They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs. You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers @@ -54,12 +62,22 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat */ const Ordered = internal.Ordered +/* +ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed. + +ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error. + +You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const ContinueOnFailure = internal.ContinueOnFailure + /* OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container. The behavior for non-Ordered containers/specs is unchanged. -You can learh more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator +You can learn more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference */ const OncePerOrdered = internal.OncePerOrdered @@ -81,6 +99,43 @@ You can learn more here: https://onsi.github.io/ginkgo/#spec-labels */ type Labels = internal.Labels +/* +PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node. + +Ginkgo will start emitting node progress if the node is still running after a duration of PollProgressAfter. This allows you to get quicker feedback about the state of a long-running spec. +*/ +type PollProgressAfter = internal.PollProgressAfter + +/* +PollProgressInterval allows you to override the configured value for --poll-progress-interval for a particular node. + +Once a node has been running for longer than PollProgressAfter Ginkgo will emit node progress periodically at an interval of PollProgresInterval. +*/ +type PollProgressInterval = internal.PollProgressInterval + +/* +NodeTimeout allows you to specify a timeout for an indivdiual node. The node cannot be a container and must be interruptible (i.e. it must be passed a function that accepts a SpecContext or context.Context). + +If the node does not exit within the specified NodeTimeout its context will be cancelled. The node wil then have a period of time controlled by the GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec. +*/ +type NodeTimeout = internal.NodeTimeout + +/* +SpecTimeout allows you to specify a timeout for an indivdiual spec. SpecTimeout can only decorate interruptible It nodes. + +All nodes associated with the It node will need to complete before the SpecTimeout has elapsed. Individual nodes (e.g. BeforeEach) may be decorated with different NodeTimeouts - but these can only serve to provide a more stringent deadline for the node in question; they cannot extend the deadline past the SpecTimeout. + +If the spec does not complete within the specified SpecTimeout the currently running node will have its context cancelled. The node wil then have a period of time controlled by that node's GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec. +*/ +type SpecTimeout = internal.SpecTimeout + +/* +GracePeriod denotes the period of time Ginkgo will wait for an interruptible node to exit once an interruption (whether due to a timeout or a user-invoked signal) has occurred. If both the global --grace-period cli flag and a GracePeriod decorator are specified the value in the decorator will take precedence. + +Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will proceed to run subsequent nodes. In the event of a timeout, such leaks will be reported to the user. +*/ +type GracePeriod = internal.GracePeriod + /* SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports. diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go index d20e5a8..f912bbe 100644 --- a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go @@ -13,7 +13,7 @@ import ( Deprecated: Done Channel for asynchronous testing The Done channel pattern is no longer supported in Ginkgo 2.0. -See here for better patterns for asynchronouse testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing +See here for better patterns for asynchronous testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing */ diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 43b1621..743555d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strconv" "strings" ) @@ -50,6 +51,37 @@ func NewWithNoColorBool(noColor bool) Formatter { } func New(colorMode ColorMode) Formatter { + colorAliases := map[string]int{ + "black": 0, + "red": 1, + "green": 2, + "yellow": 3, + "blue": 4, + "magenta": 5, + "cyan": 6, + "white": 7, + } + for colorAlias, n := range colorAliases { + colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8 + } + + getColor := func(color, defaultEscapeCode string) string { + color = strings.ToUpper(strings.ReplaceAll(color, "-", "_")) + envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color) + envVarColor := os.Getenv(envVar) + if envVarColor == "" { + return defaultEscapeCode + } + if colorCode, ok := colorAliases[envVarColor]; ok { + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + colorCode, err := strconv.Atoi(envVarColor) + if err != nil || colorCode < 0 || colorCode > 255 { + return defaultEscapeCode + } + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + f := Formatter{ ColorMode: colorMode, colors: map[string]string{ @@ -57,18 +89,18 @@ func New(colorMode ColorMode) Formatter { "bold": "\x1b[1m", "underline": "\x1b[4m", - "red": "\x1b[38;5;9m", - "orange": "\x1b[38;5;214m", - "coral": "\x1b[38;5;204m", - "magenta": "\x1b[38;5;13m", - "green": "\x1b[38;5;10m", - "dark-green": "\x1b[38;5;28m", - "yellow": "\x1b[38;5;11m", - "light-yellow": "\x1b[38;5;228m", - "cyan": "\x1b[38;5;14m", - "gray": "\x1b[38;5;243m", - "light-gray": "\x1b[38;5;246m", - "blue": "\x1b[38;5;12m", + "red": getColor("red", "\x1b[38;5;9m"), + "orange": getColor("orange", "\x1b[38;5;214m"), + "coral": getColor("coral", "\x1b[38;5;204m"), + "magenta": getColor("magenta", "\x1b[38;5;13m"), + "green": getColor("green", "\x1b[38;5;10m"), + "dark-green": getColor("dark-green", "\x1b[38;5;28m"), + "yellow": getColor("yellow", "\x1b[38;5;11m"), + "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"), + "cyan": getColor("cyan", "\x1b[38;5;14m"), + "gray": getColor("gray", "\x1b[38;5;243m"), + "light-gray": getColor("light-gray", "\x1b[38;5;246m"), + "blue": getColor("blue", "\x1b[38;5;12m"), }, } colors := []string{} @@ -88,7 +120,10 @@ func (f Formatter) Fi(indentation uint, format string, args ...interface{}) stri } func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { - out := fmt.Sprintf(f.style(format), args...) + out := f.style(format) + if len(args) > 0 { + out = fmt.Sprintf(out, args...) + } if indentation == 0 && maxWidth == 0 { return out diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go new file mode 100644 index 0000000..5db5d1a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -0,0 +1,63 @@ +package build + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBuildCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "build", + Flags: flags, + Usage: "ginkgo build ", + ShortDoc: "Build the passed in (or the package in the current directory if left blank).", + DocLink: "precompiling-suites", + Command: func(args []string, _ []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + buildSpecs(args, cliConfig, goFlagsConfig) + }, + } +} + +func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + internal.VerifyCLIAndFrameworkVersion(suites) + + opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, goFlagsConfig) + + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break + } + suites[suiteIdx] = suite + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + } else { + fmt.Printf("Compiled %s.test\n", suite.PackageName) + } + } + + if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 { + command.AbortWith("Failed to compile all tests") + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go new file mode 100644 index 0000000..2efd286 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -0,0 +1,61 @@ +package command + +import "fmt" + +type AbortDetails struct { + ExitCode int + Error error + EmitUsage bool +} + +func Abort(details AbortDetails) { + panic(details) +} + +func AbortGracefullyWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 0, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWithUsage(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: true, + }) +} + +func AbortIfError(preamble string, err error) { + if err != nil { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, err.Error()), + EmitUsage: false, + }) + } +} + +func AbortIfErrors(preamble string, errors []error) { + if len(errors) > 0 { + out := "" + for _, err := range errors { + out += err.Error() + } + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, out), + EmitUsage: false, + }) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go new file mode 100644 index 0000000..12e0e56 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -0,0 +1,50 @@ +package command + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Command struct { + Name string + Flags types.GinkgoFlagSet + Usage string + ShortDoc string + Documentation string + DocLink string + Command func(args []string, additionalArgs []string) +} + +func (c Command) Run(args []string, additionalArgs []string) { + args, err := c.Flags.Parse(args) + if err != nil { + AbortWithUsage(err.Error()) + } + + c.Command(args, additionalArgs) +} + +func (c Command) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}")) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage)))) + if c.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc)) + fmt.Fprintln(writer, "") + } + if c.Documentation != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation)) + fmt.Fprintln(writer, "") + } + if c.DocLink != "" { + fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink)) + fmt.Fprintln(writer, "") + } + flagUsage := c.Flags.Usage() + if flagUsage != "" { + fmt.Fprintf(writer, formatter.F(flagUsage)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go new file mode 100644 index 0000000..88dd8d6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -0,0 +1,182 @@ +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Program struct { + Name string + Heading string + Commands []Command + DefaultCommand Command + DeprecatedCommands []DeprecatedCommand + + //For testing - leave as nil in production + OutWriter io.Writer + ErrWriter io.Writer + Exiter func(code int) +} + +type DeprecatedCommand struct { + Name string + Deprecation types.Deprecation +} + +func (p Program) RunAndExit(osArgs []string) { + var command Command + deprecationTracker := types.NewDeprecationTracker() + if p.Exiter == nil { + p.Exiter = os.Exit + } + if p.OutWriter == nil { + p.OutWriter = formatter.ColorableStdOut + } + if p.ErrWriter == nil { + p.ErrWriter = formatter.ColorableStdErr + } + + defer func() { + exitCode := 0 + + if r := recover(); r != nil { + details, ok := r.(AbortDetails) + if !ok { + panic(r) + } + + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name)) + fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error())) + } + if details.EmitUsage { + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, "") + } + command.EmitUsage(p.ErrWriter) + } + exitCode = details.ExitCode + } + + command.Flags.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) + } + p.Exiter(exitCode) + return + }() + + args, additionalArgs := []string{}, []string{} + + foundDelimiter := false + for _, arg := range osArgs[1:] { + if !foundDelimiter { + if arg == "--" { + foundDelimiter = true + continue + } + } + + if foundDelimiter { + additionalArgs = append(additionalArgs, arg) + } else { + args = append(args, arg) + } + } + + command = p.DefaultCommand + if len(args) > 0 { + p.handleHelpRequestsAndExit(p.OutWriter, args) + if command.Name == args[0] { + args = args[1:] + } else { + for _, deprecatedCommand := range p.DeprecatedCommands { + if deprecatedCommand.Name == args[0] { + deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation) + return + } + } + for _, tryCommand := range p.Commands { + if tryCommand.Name == args[0] { + command, args = tryCommand, args[1:] + break + } + } + } + } + + command.Run(args, additionalArgs) +} + +func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { + if len(args) == 0 { + return + } + + matchesHelpFlag := func(args ...string) bool { + for _, arg := range args { + if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" { + return true + } + } + return false + } + if len(args) == 1 { + if args[0] == "help" || matchesHelpFlag(args[0]) { + p.EmitUsage(writer) + Abort(AbortDetails{}) + } + } else { + var name string + if args[0] == "help" || matchesHelpFlag(args[0]) { + name = args[1] + } else if matchesHelpFlag(args[1:]...) { + name = args[0] + } else { + return + } + + if p.DefaultCommand.Name == name || p.Name == name { + p.DefaultCommand.EmitUsage(writer) + Abort(AbortDetails{}) + } + for _, command := range p.Commands { + if command.Name == name { + command.EmitUsage(writer) + Abort(AbortDetails{}) + } + } + + fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name)) + fmt.Fprintln(writer, "") + p.EmitUsage(writer) + Abort(AbortDetails{ExitCode: 1}) + } + return +} + +func (p Program) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F(p.Heading)) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading)))) + fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name)) + fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name)) + fmt.Fprintln(writer, "") + fmt.Fprintln(writer, formatter.F("The following commands are available:")) + + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage)) + if p.DefaultCommand.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc)) + } + + for _, command := range p.Commands { + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage)) + if command.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc)) + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go new file mode 100644 index 0000000..a367a1f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go @@ -0,0 +1,48 @@ +package generators + +var bootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} +` + +var agoutiBootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} + +var agoutiDriver *agouti.WebDriver + +var _ = {{.GinkgoPackage}}BeforeSuite(func() { + // Choose a WebDriver: + + agoutiDriver = agouti.PhantomJS() + // agoutiDriver = agouti.Selenium() + // agoutiDriver = agouti.ChromeDriver() + + {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed()) +}) + +var _ = {{.GinkgoPackage}}AfterSuite(func() { + {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed()) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go new file mode 100644 index 0000000..73aff0b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -0,0 +1,133 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBootstrapCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "bootstrap", + Usage: "ginkgo bootstrap", + ShortDoc: "Bootstrap a test suite for the current package", + Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure. + +{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`, + DocLink: "generators", + Flags: flags, + Command: func(_ []string, _ []string) { + generateBootstrap(conf) + }, + } +} + +type bootstrapData struct { + Package string + FormattedName string + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateBootstrap(conf GeneratorsConfig) { + packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName() + + data := bootstrapData{ + Package: determinePackageName(packageName, conf.Internal), + FormattedName: formattedName, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom bootstrap file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom boostrap data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiBootstrapText + } else { + templateText = bootstrapText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to parse bootstrap template:", err) + + buf := &bytes.Buffer{} + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = bootstrapTemplate.Execute(buf, data) + command.AbortIfError("Failed to render bootstrap template:", err) + + buf.WriteTo(f) + + internal.GoFmt(targetFile) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go new file mode 100644 index 0000000..48d23f9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -0,0 +1,259 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildGenerateCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, generate will create a test file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the test file template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "generate", + Usage: "ginkgo generate ", + ShortDoc: "Generate a test file named _test.go", + Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created. + +You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go. + +You can also pass a of the form "file.go" and generate will emit "file_test.go".`, + DocLink: "generators", + Flags: flags, + Command: func(args []string, _ []string) { + generateTestFiles(conf, args) + }, + } +} + +type specData struct { + Package string + Subject string + PackageImportPath string + ImportPackage bool + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateTestFiles(conf GeneratorsConfig, args []string) { + subjects := args + if len(subjects) == 0 { + subjects = []string{""} + } + for _, subject := range subjects { + generateTestFileForSubject(subject, conf) + } +} + +func generateTestFileForSubject(subject string, conf GeneratorsConfig) { + packageName, specFilePrefix, formattedName := getPackageAndFormattedName() + if subject != "" { + specFilePrefix = formatSubject(subject) + formattedName = prettifyName(specFilePrefix) + } + + if conf.Internal { + specFilePrefix = specFilePrefix + "_internal" + } + + data := specData{ + Package: determinePackageName(packageName, conf.Internal), + Subject: formattedName, + PackageImportPath: getPackageImportPath(), + ImportPackage: !conf.Internal, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_test.go", specFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create test file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom template file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom template data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiSpecText + } else { + templateText = specText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to read parse test template:", err) + + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = specTemplate.Execute(f, data) + command.AbortIfError("Failed to render bootstrap template:", err) + internal.GoFmt(targetFile) +} + +func formatSubject(name string) string { + name = strings.ReplaceAll(name, "-", "_") + name = strings.ReplaceAll(name, " ", "_") + name = strings.Split(name, ".go")[0] + name = strings.Split(name, "_test")[0] + return name +} + +// moduleName returns module name from go.mod from given module root directory +func moduleName(modRoot string) string { + modFile, err := os.Open(filepath.Join(modRoot, "go.mod")) + if err != nil { + return "" + } + + mod := make([]byte, 128) + _, err = modFile.Read(mod) + if err != nil { + return "" + } + + slashSlash := []byte("//") + moduleStr := []byte("module") + + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + + return "" // missing module path +} + +func findModuleRoot(dir string) (root string) { + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { + return dir + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "" +} + +func getPackageImportPath() string { + workingDir, err := os.Getwd() + if err != nil { + panic(err.Error()) + } + + sep := string(filepath.Separator) + + // Try go.mod file first + modRoot := findModuleRoot(workingDir) + if modRoot != "" { + modName := moduleName(modRoot) + if modName != "" { + cd := strings.ReplaceAll(workingDir, modRoot, "") + cd = strings.ReplaceAll(cd, sep, "/") + return modName + cd + } + } + + // Fallback to GOPATH structure + paths := strings.Split(workingDir, sep+"src"+sep) + if len(paths) == 1 { + fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") + return "UNKNOWN_PACKAGE_PATH" + } + return filepath.ToSlash(paths[len(paths)-1]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go new file mode 100644 index 0000000..c3470ad --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -0,0 +1,41 @@ +package generators + +var specText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + +}) +` + +var agoutiSpecText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" + . "github.com/sclevine/agouti/matchers" + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + var page *agouti.Page + + {{.GinkgoPackage}}BeforeEach(func() { + var err error + page, err = agoutiDriver.NewPage() + {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred()) + }) + + {{.GinkgoPackage}}AfterEach(func() { + {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed()) + }) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go new file mode 100644 index 0000000..3046a44 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -0,0 +1,64 @@ +package generators + +import ( + "go/build" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +type GeneratorsConfig struct { + Agouti, NoDot, Internal bool + CustomTemplate string + CustomTemplateData string +} + +func getPackageAndFormattedName() (string, string, string) { + path, err := os.Getwd() + command.AbortIfError("Could not get current working directory:", err) + + dirName := strings.ReplaceAll(filepath.Base(path), "-", "_") + dirName = strings.ReplaceAll(dirName, " ", "_") + + pkg, err := build.ImportDir(path, 0) + packageName := pkg.Name + if err != nil { + packageName = ensureLegalPackageName(dirName) + } + + formattedName := prettifyName(filepath.Base(path)) + return packageName, dirName, formattedName +} + +func ensureLegalPackageName(name string) string { + if name == "_" { + return "underscore" + } + if len(name) == 0 { + return "empty" + } + n, isDigitErr := strconv.Atoi(string(name[0])) + if isDigitErr == nil { + return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:] + } + return name +} + +func prettifyName(name string) string { + name = strings.ReplaceAll(name, "-", " ") + name = strings.ReplaceAll(name, "_", " ") + name = strings.Title(name) + name = strings.ReplaceAll(name, " ", "") + return name +} + +func determinePackageName(name string, internal bool) string { + if internal { + return name + } + + return name + "_test" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go new file mode 100644 index 0000000..86da734 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -0,0 +1,161 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite { + if suite.PathToCompiledTest != "" { + return suite + } + + suite.CompilationError = nil + + path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test")) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error()) + return suite + } + + ginkgoInvocationPath, _ := os.Getwd() + ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) + packagePath := suite.AbsPath() + pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) + return suite + } + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) + return suite + } + + cmd := exec.Command("go", args...) + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output) + } else { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error()) + } + return suite + } + + if strings.Contains(string(output), "[no test files]") { + suite.State = TestSuiteStateSkippedDueToEmptyCompilation + return suite + } + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if !FileExists(path) { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path) + return suite + } + + suite.State = TestSuiteStateCompiled + suite.PathToCompiledTest = path + return suite +} + +func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) { + if goFlagsConfig.BinaryMustBePreserved() { + return + } + for _, suite := range suites { + if !suite.Precompiled { + os.Remove(suite.PathToCompiledTest) + } + } +} + +type parallelSuiteBundle struct { + suite TestSuite + compiled chan TestSuite +} + +type OrderedParallelCompiler struct { + mutex *sync.Mutex + stopped bool + numCompilers int + + idx int + numSuites int + completionChannels []chan TestSuite +} + +func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { + return &OrderedParallelCompiler{ + mutex: &sync.Mutex{}, + numCompilers: numCompilers, + } +} + +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) { + opc.stopped = false + opc.idx = 0 + opc.numSuites = len(suites) + opc.completionChannels = make([]chan TestSuite, opc.numSuites) + + toCompile := make(chan parallelSuiteBundle, opc.numCompilers) + for compiler := 0; compiler < opc.numCompilers; compiler++ { + go func() { + for bundle := range toCompile { + c, suite := bundle.compiled, bundle.suite + opc.mutex.Lock() + stopped := opc.stopped + opc.mutex.Unlock() + if !stopped { + suite = CompileSuite(suite, goFlagsConfig) + } + c <- suite + } + }() + } + + for idx, suite := range suites { + opc.completionChannels[idx] = make(chan TestSuite, 1) + toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]} + if idx == 0 { //compile first suite serially + suite = <-opc.completionChannels[0] + opc.completionChannels[0] <- suite + } + } + + close(toCompile) +} + +func (opc *OrderedParallelCompiler) Next() (int, TestSuite) { + if opc.idx >= opc.numSuites { + return opc.numSuites, TestSuite{} + } + + idx := opc.idx + suite := <-opc.completionChannels[idx] + opc.idx = opc.idx + 1 + + return idx, suite +} + +func (opc *OrderedParallelCompiler) StopAndDrain() { + opc.mutex.Lock() + opc.stopped = true + opc.mutex.Unlock() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go new file mode 100644 index 0000000..bd3c6d0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -0,0 +1,237 @@ +package internal + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + "github.com/google/pprof/profile" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { + suffix := "" + if process != 0 { + suffix = fmt.Sprintf(".%d", process) + } + if cliConfig.OutputDir == "" { + return filepath.Join(suite.AbsPath(), assetName+suffix) + } + outputDir, _ := filepath.Abs(cliConfig.OutputDir) + return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix) +} + +func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) { + messages := []string{} + suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile + + // merge cover profiles if need be + if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles { + coverProfiles := []string{} + for _, suite := range suitesWithProfiles { + if !suite.HasProgrammaticFocus { + coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)) + } + } + + if len(coverProfiles) > 0 { + dst := goFlagsConfig.CoverProfile + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile) + } + err := MergeAndCleanupCoverProfiles(coverProfiles, dst) + if err != nil { + return messages, err + } + coverage, err := GetCoverageFromCoverProfile(dst) + if err != nil { + return messages, err + } + if coverage == 0 { + messages = append(messages, "composite coverage: [no statements]") + } else if suitesWithProfiles.AnyHaveProgrammaticFocus() { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage)) + } else { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage)) + } + } else { + messages = append(messages, "no composite coverage computed: all suites included programatically focused specs") + } + } + + // copy binaries if need be + for _, suite := range suitesWithProfiles { + if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" { + src := suite.PathToCompiledTest + dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test") + if suite.Precompiled { + if err := CopyFile(src, dst); err != nil { + return messages, err + } + } else { + if err := os.Rename(src, dst); err != nil { + return messages, err + } + } + } + } + + type reportFormat struct { + ReportName string + GenerateFunc func(types.Report, string) error + MergeFunc func([]string, string) ([]string, error) + } + reportFormats := []reportFormat{} + if reporterConfig.JSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) + } + if reporterConfig.JUnitReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) + } + if reporterConfig.TeamcityReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports}) + } + + // Generate reports for suites that failed to run + reportableSuites := suites.ThatAreGinkgoSuites() + for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) { + report := types.Report{ + SuitePath: suite.AbsPath(), + SuiteConfig: suiteConfig, + SuiteSucceeded: false, + } + switch suite.State { + case TestSuiteStateFailedToCompile: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error()) + case TestSuiteStateFailedDueToTimeout: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON) + case TestSuiteStateSkippedDueToPriorFailures: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON) + case TestSuiteStateSkippedDueToEmptyCompilation: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON) + report.SuiteSucceeded = true + } + + for _, format := range reportFormats { + format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + } + + // Merge reports unless we've been asked to keep them separate + if !cliConfig.KeepSeparateReports { + for _, format := range reportFormats { + reports := []string{} + for _, suite := range reportableSuites { + reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + dst := format.ReportName + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, format.ReportName) + } + mergeMessages, err := format.MergeFunc(reports, dst) + messages = append(messages, mergeMessages...) + if err != nil { + return messages, err + } + } + } + + return messages, nil +} + +//loads each profile, combines them, deletes them, stores them in destination +func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { + combined := &bytes.Buffer{} + modeRegex := regexp.MustCompile(`^mode: .*\n`) + for i, profile := range profiles { + contents, err := os.ReadFile(profile) + if err != nil { + return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + } + os.Remove(profile) + + // remove the cover mode line from every file + // except the first one + if i > 0 { + contents = modeRegex.ReplaceAll(contents, []byte{}) + } + + _, err = combined.Write(contents) + + // Add a newline to the end of every file if missing. + if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { + _, err = combined.Write([]byte("\n")) + } + + if err != nil { + return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + } + } + + err := os.WriteFile(destination, combined.Bytes(), 0666) + if err != nil { + return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + } + return nil +} + +func GetCoverageFromCoverProfile(profile string) (float64, error) { + cmd := exec.Command("go", "tool", "cover", "-func", profile) + output, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + } + re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) + matches := re.FindStringSubmatch(string(output)) + if matches == nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage") + } + coverageString := matches[1] + coverage, err := strconv.ParseFloat(coverageString, 64) + if err != nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error()) + } + + return coverage, nil +} + +func MergeProfiles(profilePaths []string, destination string) error { + profiles := []*profile.Profile{} + for _, profilePath := range profilePaths { + proFile, err := os.Open(profilePath) + if err != nil { + return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) + } + prof, err := profile.Parse(proFile) + if err != nil { + return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) + } + profiles = append(profiles, prof) + os.Remove(profilePath) + } + + mergedProfile, err := profile.Merge(profiles) + if err != nil { + return fmt.Errorf("Could not merge profiles:\n%s", err.Error()) + } + + outFile, err := os.Create(destination) + if err != nil { + return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error()) + } + err = mergedProfile.Write(outFile) + if err != nil { + return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error()) + } + err = outFile.Close() + if err != nil { + return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error()) + } + + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go new file mode 100644 index 0000000..41052ea --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -0,0 +1,355 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + suite.State = TestSuiteStateFailed + suite.HasProgrammaticFocus = false + + if suite.PathToCompiledTest == "" { + return suite + } + + if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 { + suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else if suite.IsGinkgo { + suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else { + suite = runGoTest(suite, cliConfig, goFlagsConfig) + } + runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite) + return suite +} + +func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) { + buf := &bytes.Buffer{} + cmd := exec.Command(suite.PathToCompiledTest, args...) + cmd.Dir = suite.Path + if pipeToStdout { + cmd.Stderr = io.MultiWriter(os.Stdout, buf) + cmd.Stdout = os.Stdout + } else { + cmd.Stderr = buf + cmd.Stdout = buf + } + err := cmd.Start() + command.AbortIfError("Failed to start test suite", err) + + return cmd, buf +} + +func checkForNoTestsWarning(buf *bytes.Buffer) bool { + if strings.Contains(buf.String(), "warning: no tests to run") { + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + return true + } + return false +} + +func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite { + // As we run the go test from the suite directory, make sure the cover profile is absolute + // and placed into the expected output directory when one is configured. + if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + + args, err := types.GenerateGoTestRunArgs(goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + return suite +} + +func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + if goFlagsConfig.Cover { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + if goFlagsConfig.BlockProfile != "" { + goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + } + if goFlagsConfig.CPUProfile != "" { + goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MemProfile != "" { + goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MutexProfile != "" { + goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + } + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + if suite.HasProgrammaticFocus { + if goFlagsConfig.Cover { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } + if goFlagsConfig.BlockProfile != "" { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } + if goFlagsConfig.CPUProfile != "" { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MemProfile != "" { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MutexProfile != "" { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } + } + + return suite +} + +func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + type procResult struct { + passed bool + hasProgrammaticFocus bool + } + + numProcs := cliConfig.ComputedProcs() + procOutput := make([]*bytes.Buffer, numProcs) + coverProfiles := []string{} + + blockProfiles := []string{} + cpuProfiles := []string{} + memProfiles := []string{} + mutexProfiles := []string{} + + procResults := make(chan procResult) + + server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)) + command.AbortIfError("Failed to start parallel spec server", err) + server.Start() + defer server.Close() + + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + for proc := 1; proc <= numProcs; proc++ { + procGinkgoConfig := ginkgoConfig + procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address() + + procGoFlagsConfig := goFlagsConfig + if goFlagsConfig.Cover { + procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc) + coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile) + } + if goFlagsConfig.BlockProfile != "" { + procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc) + blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile) + } + if goFlagsConfig.CPUProfile != "" { + procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc) + cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile) + } + if goFlagsConfig.MemProfile != "" { + procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc) + memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile) + } + if goFlagsConfig.MutexProfile != "" { + procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc) + mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile) + } + + args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, false) + procOutput[proc-1] = buf + server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + + go func() { + cmd.Wait() + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + procResults <- procResult{ + passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), + hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, + } + }() + } + + passed := true + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + result := <-procResults + passed = passed && result.passed + suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + } + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + select { + case <-server.GetSuiteDone(): + fmt.Println("") + case <-time.After(time.Second): + //one of the nodes never finished reporting to the server. Something must have gone wrong. + fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n")) + fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path)) + fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n")) + fmt.Fprintln(formatter.ColorableStdErr, " ") + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + } + fmt.Fprintf(os.Stderr, "** End **") + } + + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + output := procOutput[proc-1].String() + if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite { + suite.State = TestSuiteStateFailed + } + if strings.Contains(output, "deprecated Ginkgo functionality") { + fmt.Fprintln(os.Stderr, output) + } + } + + if len(coverProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } else { + coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile) + command.AbortIfError("Failed to combine cover profiles", err) + + coverage, err := GetCoverageFromCoverProfile(coverProfile) + command.AbortIfError("Failed to compute coverage", err) + if coverage == 0 { + fmt.Fprintln(os.Stdout, "coverage: [no statements]") + } else { + fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage) + } + } + } + if len(blockProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } else { + blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + err := MergeProfiles(blockProfiles, blockProfile) + command.AbortIfError("Failed to combine blockprofiles", err) + } + } + if len(cpuProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } else { + cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + err := MergeProfiles(cpuProfiles, cpuProfile) + command.AbortIfError("Failed to combine cpuprofiles", err) + } + } + if len(memProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } else { + memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + err := MergeProfiles(memProfiles, memProfile) + command.AbortIfError("Failed to combine memprofiles", err) + } + } + if len(mutexProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } else { + mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + err := MergeProfiles(mutexProfiles, mutexProfile) + command.AbortIfError("Failed to combine mutexprofiles", err) + } + } + + return suite +} + +func runAfterRunHook(command string, noColor bool, suite TestSuite) { + if command == "" { + return + } + f := formatter.NewWithNoColorBool(noColor) + + // Allow for string replacement to pass input to the command + passed := "[FAIL]" + if suite.State.Is(TestSuiteStatePassed) { + passed = "[PASS]" + } + command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed) + command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName) + + // Must break command into parts + splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`) + parts := splitArgs.FindAllString(command, -1) + + output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput() + if err != nil { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output)) + } else { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go new file mode 100644 index 0000000..64dcb1b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -0,0 +1,283 @@ +package internal + +import ( + "errors" + "math/rand" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed" +const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set" +const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found" + +type TestSuiteState uint + +const ( + TestSuiteStateInvalid TestSuiteState = iota + + TestSuiteStateUncompiled + TestSuiteStateCompiled + + TestSuiteStatePassed + + TestSuiteStateSkippedDueToEmptyCompilation + TestSuiteStateSkippedByFilter + TestSuiteStateSkippedDueToPriorFailures + + TestSuiteStateFailed + TestSuiteStateFailedDueToTimeout + TestSuiteStateFailedToCompile +) + +var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile} + +func (state TestSuiteState) Is(states ...TestSuiteState) bool { + for _, suiteState := range states { + if suiteState == state { + return true + } + } + + return false +} + +type TestSuite struct { + Path string + PackageName string + IsGinkgo bool + + Precompiled bool + PathToCompiledTest string + CompilationError error + + HasProgrammaticFocus bool + State TestSuiteState +} + +func (ts TestSuite) AbsPath() string { + path, _ := filepath.Abs(ts.Path) + return path +} + +func (ts TestSuite) NamespacedName() string { + name := relPath(ts.Path) + name = strings.TrimLeft(name, "."+string(filepath.Separator)) + name = strings.ReplaceAll(name, string(filepath.Separator), "_") + name = strings.ReplaceAll(name, " ", "_") + if name == "" { + return ts.PackageName + } + return name +} + +type TestSuites []TestSuite + +func (ts TestSuites) AnyHaveProgrammaticFocus() bool { + for _, suite := range ts { + if suite.HasProgrammaticFocus { + return true + } + } + + return false +} + +func (ts TestSuites) ThatAreGinkgoSuites() TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.IsGinkgo { + out = append(out, suite) + } + } + return out +} + +func (ts TestSuites) CountWithState(states ...TestSuiteState) int { + n := 0 + for _, suite := range ts { + if suite.State.Is(states...) { + n += 1 + } + } + + return n +} + +func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if !suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) ShuffledCopy(seed int64) TestSuites { + out := make(TestSuites, len(ts)) + permutation := rand.New(rand.NewSource(seed)).Perm(len(ts)) + for i, j := range permutation { + out[i] = ts[j] + } + return out +} + +func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites { + suites := TestSuites{} + + if len(args) > 0 { + for _, arg := range args { + if allowPrecompiled { + suite, err := precompiledTestSuite(arg) + if err == nil { + suites = append(suites, suite) + continue + } + } + recurseForSuite := cliConfig.Recurse + if strings.HasSuffix(arg, "/...") && arg != "/..." { + arg = arg[:len(arg)-4] + recurseForSuite = true + } + suites = append(suites, suitesInDir(arg, recurseForSuite)...) + } + } else { + suites = suitesInDir(".", cliConfig.Recurse) + } + + if cliConfig.SkipPackage != "" { + skipFilters := strings.Split(cliConfig.SkipPackage, ",") + for idx := range suites { + for _, skipFilter := range skipFilters { + if strings.Contains(suites[idx].Path, skipFilter) { + suites[idx].State = TestSuiteStateSkippedByFilter + break + } + } + } + } + + return suites +} + +func precompiledTestSuite(path string) (TestSuite, error) { + info, err := os.Stat(path) + if err != nil { + return TestSuite{}, err + } + + if info.IsDir() { + return TestSuite{}, errors.New("this is a directory, not a file") + } + + if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" { + return TestSuite{}, errors.New("this is not a .test binary") + } + + if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 { + return TestSuite{}, errors.New("this is not executable") + } + + dir := relPath(filepath.Dir(path)) + packageName := strings.TrimSuffix(filepath.Base(path), ".exe") + packageName = strings.TrimSuffix(packageName, ".test") + + path, err = filepath.Abs(path) + if err != nil { + return TestSuite{}, err + } + + return TestSuite{ + Path: dir, + PackageName: packageName, + IsGinkgo: true, + Precompiled: true, + PathToCompiledTest: path, + State: TestSuiteStateCompiled, + }, nil +} + +func suitesInDir(dir string, recurse bool) TestSuites { + suites := TestSuites{} + + if path.Base(dir) == "vendor" { + return suites + } + + files, _ := os.ReadDir(dir) + re := regexp.MustCompile(`^[^._].*_test\.go$`) + for _, file := range files { + if !file.IsDir() && re.Match([]byte(file.Name())) { + suite := TestSuite{ + Path: relPath(dir), + PackageName: packageNameForSuite(dir), + IsGinkgo: filesHaveGinkgoSuite(dir, files), + State: TestSuiteStateUncompiled, + } + suites = append(suites, suite) + break + } + } + + if recurse { + re = regexp.MustCompile(`^[._]`) + for _, file := range files { + if file.IsDir() && !re.Match([]byte(file.Name())) { + suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) + } + } + } + + return suites +} + +func relPath(dir string) string { + dir, _ = filepath.Abs(dir) + cwd, _ := os.Getwd() + dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + + return dir +} + +func packageNameForSuite(dir string) string { + path, _ := filepath.Abs(dir) + return filepath.Base(path) +} + +func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { + reTestFile := regexp.MustCompile(`_test\.go$`) + reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) + + for _, file := range files { + if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { + contents, _ := os.ReadFile(dir + "/" + file.Name()) + if reGinkgo.Match(contents) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go new file mode 100644 index 0000000..bd9ca7d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go @@ -0,0 +1,86 @@ +package internal + +import ( + "fmt" + "io" + "os" + "os/exec" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func CopyFile(src string, dest string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + + srcStat, err := srcFile.Stat() + if err != nil { + return err + } + + if _, err := os.Stat(dest); err == nil { + os.Remove(dest) + } + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode()) + if err != nil { + return err + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + return err + } + + if err := srcFile.Close(); err != nil { + return err + } + return destFile.Close() +} + +func GoFmt(path string) { + out, err := exec.Command("go", "fmt", path).CombinedOutput() + if err != nil { + command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err) + } +} + +func PluralizedWord(singular, plural string, count int) string { + if count == 1 { + return singular + } + return plural +} + +func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string { + out := "" + out += "There were failures detected in the following suites:\n" + + maxPackageNameLength := 0 + for _, suite := range suites.WithState(TestSuiteStateFailureStates...) { + if len(suite.PackageName) > maxPackageNameLength { + maxPackageNameLength = len(suite.PackageName) + } + } + + packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) + for _, suite := range suites { + switch suite.State { + case TestSuiteStateFailed: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedToCompile: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedDueToTimeout: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go new file mode 100644 index 0000000..9da1bab --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go @@ -0,0 +1,54 @@ +package internal + +import ( + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`) + +func VerifyCLIAndFrameworkVersion(suites TestSuites) { + cliVersion := types.VERSION + mismatches := map[string][]string{} + + for _, suite := range suites { + cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2") + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + continue + } + components := strings.Split(string(output), " ") + if len(components) != 2 { + continue + } + matches := versiorRe.FindStringSubmatch(components[1]) + if matches == nil || len(matches) != 2 { + continue + } + libraryVersion := matches[1] + if cliVersion != libraryVersion { + mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName) + } + } + + if len(mismatches) == 0 { + return + } + + fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}")) + + fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:")) + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion)) + fmt.Println(formatter.Fi(1, "Mismatched package versions found:")) + for version, packages := range mismatches { + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", "))) + } + fmt.Println("") + fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}")) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go new file mode 100644 index 0000000..6c61f09 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go @@ -0,0 +1,123 @@ +package labels + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "sort" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/ast/inspector" +) + +func BuildLabelsCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + + flags, err := types.BuildLabelsCommandFlagSet(&cliConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "labels", + Usage: "ginkgo labels ", + Flags: flags, + ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).", + DocLink: "spec-labels", + Command: func(args []string, _ []string) { + ListLabels(args, cliConfig) + }, + } +} + +func ListLabels(args []string, cliConfig types.CLIConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + for _, suite := range suites { + labels := fetchLabelsFromPackage(suite.Path) + if len(labels) == 0 { + fmt.Printf("%s: No labels found\n", suite.PackageName) + } else { + fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", ")) + } + } +} + +func fetchLabelsFromPackage(packagePath string) []string { + fset := token.NewFileSet() + parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0) + command.AbortIfError("Failed to parse package source:", err) + + files := []*ast.File{} + hasTestPackage := false + for key, pkg := range parsedPackages { + if strings.HasSuffix(key, "_test") { + hasTestPackage = true + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + if !hasTestPackage { + for _, pkg := range parsedPackages { + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + + seen := map[string]bool{} + labels := []string{} + ispr := inspector.New(files) + ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) { + potentialLabels := fetchLabels(n.(*ast.CallExpr)) + for _, label := range potentialLabels { + if !seen[label] { + seen[label] = true + labels = append(labels, strconv.Quote(label)) + } + } + }) + + sort.Strings(labels) + return labels +} + +func fetchLabels(callExpr *ast.CallExpr) []string { + out := []string{} + switch expr := callExpr.Fun.(type) { + case *ast.Ident: + if expr.Name != "Label" { + return out + } + case *ast.SelectorExpr: + if expr.Sel.Name != "Label" { + return out + } + default: + return out + } + for _, arg := range callExpr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go new file mode 100644 index 0000000..e9abb27 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/build" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/generators" + "github.com/onsi/ginkgo/v2/ginkgo/labels" + "github.com/onsi/ginkgo/v2/ginkgo/outline" + "github.com/onsi/ginkgo/v2/ginkgo/run" + "github.com/onsi/ginkgo/v2/ginkgo/unfocus" + "github.com/onsi/ginkgo/v2/ginkgo/watch" + "github.com/onsi/ginkgo/v2/types" +) + +var program command.Program + +func GenerateCommands() []command.Command { + return []command.Command{ + watch.BuildWatchCommand(), + build.BuildBuildCommand(), + generators.BuildBootstrapCommand(), + generators.BuildGenerateCommand(), + labels.BuildLabelsCommand(), + outline.BuildOutlineCommand(), + unfocus.BuildUnfocusCommand(), + BuildVersionCommand(), + } +} + +func main() { + program = command.Program{ + Name: "ginkgo", + Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION), + Commands: GenerateCommands(), + DefaultCommand: run.BuildRunCommand(), + DeprecatedCommands: []command.DeprecatedCommand{ + {Name: "convert", Deprecation: types.Deprecations.Convert()}, + {Name: "blur", Deprecation: types.Deprecations.Blur()}, + {Name: "nodot", Deprecation: types.Deprecations.Nodot()}, + }, + } + + program.RunAndExit(os.Args) +} + +func BuildVersionCommand() command.Command { + return command.Command{ + Name: "version", + Usage: "ginkgo version", + ShortDoc: "Print Ginkgo's version", + Command: func(_ []string, _ []string) { + fmt.Printf("Ginkgo Version %s\n", types.VERSION) + }, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go new file mode 100644 index 0000000..0b9b19f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -0,0 +1,302 @@ +package outline + +import ( + "github.com/onsi/ginkgo/v2/types" + "go/ast" + "go/token" + "strconv" +) + +const ( + // undefinedTextAlt is used if the spec/container text cannot be derived + undefinedTextAlt = "undefined" +) + +// ginkgoMetadata holds useful bits of information for every entry in the outline +type ginkgoMetadata struct { + // Name is the spec or container function name, e.g. `Describe` or `It` + Name string `json:"name"` + + // Text is the `text` argument passed to specs, and some containers + Text string `json:"text"` + + // Start is the position of first character of the spec or container block + Start int `json:"start"` + + // End is the position of first character immediately after the spec or container block + End int `json:"end"` + + Spec bool `json:"spec"` + Focused bool `json:"focused"` + Pending bool `json:"pending"` + Labels []string `json:"labels"` +} + +// ginkgoNode is used to construct the outline as a tree +type ginkgoNode struct { + ginkgoMetadata + Nodes []*ginkgoNode `json:"nodes"` +} + +type walkFunc func(n *ginkgoNode) + +func (n *ginkgoNode) PreOrder(f walkFunc) { + f(n) + for _, m := range n.Nodes { + m.PreOrder(f) + } +} + +func (n *ginkgoNode) PostOrder(f walkFunc) { + for _, m := range n.Nodes { + m.PostOrder(f) + } + f(n) +} + +func (n *ginkgoNode) Walk(pre, post walkFunc) { + pre(n) + for _, m := range n.Nodes { + m.Walk(pre, post) + } + post(n) +} + +// PropagateInheritedProperties propagates the Pending and Focused properties +// through the subtree rooted at n. +func (n *ginkgoNode) PropagateInheritedProperties() { + n.PreOrder(func(thisNode *ginkgoNode) { + for _, descendantNode := range thisNode.Nodes { + if thisNode.Pending { + descendantNode.Pending = true + descendantNode.Focused = false + } + if thisNode.Focused && !descendantNode.Pending { + descendantNode.Focused = true + } + } + }) +} + +// BackpropagateUnfocus propagates the Focused property through the subtree +// rooted at n. It applies the rule described in the Ginkgo docs: +// > Nested programmatically focused specs follow a simple rule: if a +// > leaf-node is marked focused, any of its ancestor nodes that are marked +// > focus will be unfocused. +func (n *ginkgoNode) BackpropagateUnfocus() { + focusedSpecInSubtreeStack := []bool{} + n.PostOrder(func(thisNode *ginkgoNode) { + if thisNode.Spec { + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) + return + } + focusedSpecInSubtree := false + for range thisNode.Nodes { + focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] + focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] + } + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) + if focusedSpecInSubtree { + thisNode.Focused = false + } + }) + +} + +func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { + switch ex := ce.Fun.(type) { + case *ast.Ident: + return "", ex.Name, true + case *ast.SelectorExpr: + pkgID, ok := ex.X.(*ast.Ident) + if !ok { + return "", "", false + } + // A package identifier is top-level, so Obj must be nil + if pkgID.Obj != nil { + return "", "", false + } + if ex.Sel == nil { + return "", "", false + } + return pkgID.Name, ex.Sel.Name, true + default: + return "", "", false + } +} + +// absoluteOffsetsForNode derives the absolute character offsets of the node start and +// end positions. +func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { + return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset +} + +// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree +// corresponding to a Ginkgo container or spec. +func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) { + packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) + if !ok { + return nil, false + } + + n := ginkgoNode{} + n.Name = identName + n.Start, n.End = absoluteOffsetsForNode(fset, ce) + n.Nodes = make([]*ginkgoNode, 0) + switch identName { + case "It", "Specify", "Entry": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FIt", "FSpecify", "FEntry": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "Context", "Describe", "When", "DescribeTable": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FContext", "FDescribe", "FWhen", "FDescribeTable": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "By": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterEach", "BeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "JustAfterEach", "JustBeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterSuite", "BeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + default: + return nil, false + } +} + +// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or +// container. If it cannot derive it, it returns the alt text. +func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { + text, defined := textFromCallExpr(ce) + if !defined { + return alt + } + return text +} + +// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If +// it cannot derive it, it returns false. +func textFromCallExpr(ce *ast.CallExpr) (string, bool) { + if len(ce.Args) < 1 { + return "", false + } + text, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return "", false + } + switch text.Kind { + case token.CHAR, token.STRING: + // For token.CHAR and token.STRING, Value is quoted + unquoted, err := strconv.Unquote(text.Value) + if err != nil { + // If unquoting fails, just use the raw Value + return text.Value, true + } + return unquoted, true + default: + return text.Value, true + } +} + +func labelFromCallExpr(ce *ast.CallExpr) []string { + + labels := []string{} + if len(ce.Args) < 2 { + return labels + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Label" { + ls := extractLabels(expr) + for _, label := range ls { + labels = append(labels, label) + } + } + } + } + return labels +} + +func extractLabels(expr *ast.CallExpr) []string { + out := []string{} + for _, arg := range expr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + + return out +} + +func pendingFromCallExpr(ce *ast.CallExpr) bool { + + pending := false + if len(ce.Args) < 2 { + return pending + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Pending" { + pending = true + } + case *ast.Ident: + if expr.Name == "Pending" { + pending = true + } + } + } + return pending +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go new file mode 100644 index 0000000..67ec5ab --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -0,0 +1,65 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Most of the required functions were available in the +// "golang.org/x/tools/go/ast/astutil" package, but not exported. +// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go + +package outline + +import ( + "go/ast" + "strconv" + "strings" +) + +// packageNameForImport returns the package name for the package. If the package +// is not imported, it returns nil. "Package name" refers to `pkgname` in the +// call expression `pkgname.ExportedIdentifier`. Examples: +// (import path not found) -> nil +// "import example.com/pkg/foo" -> "foo" +// "import fooalias example.com/pkg/foo" -> "fooalias" +// "import . example.com/pkg/foo" -> "" +func packageNameForImport(f *ast.File, path string) *string { + spec := importSpec(f, path) + if spec == nil { + return nil + } + name := spec.Name.String() + if name == "" { + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + } + if name == "." { + name = "" + } + return &name +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if strings.HasPrefix(importPath(s), path) { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go new file mode 100644 index 0000000..c2327cd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -0,0 +1,110 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // ginkgoImportPath is the well-known ginkgo import path + ginkgoImportPath = "github.com/onsi/ginkgo/v2" +) + +// FromASTFile returns an outline for a Ginkgo test source file +func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { + ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) + if ginkgoPackageName == nil { + return nil, fmt.Errorf("file does not import %q", ginkgoImportPath) + } + + root := ginkgoNode{} + stack := []*ginkgoNode{&root} + ispr := inspector.New([]*ast.File{src}) + ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { + if push { + // Pre-order traversal + ce, ok := node.(*ast.CallExpr) + if !ok { + // Because `Nodes` calls this function only when the node is an + // ast.CallExpr, this should never happen + panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) + } + gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName) + if !ok { + // Node is not a Ginkgo spec or container, continue + return true + } + parent := stack[len(stack)-1] + parent.Nodes = append(parent.Nodes, gn) + stack = append(stack, gn) + return true + } + // Post-order traversal + start, end := absoluteOffsetsForNode(fset, node) + lastVisitedGinkgoNode := stack[len(stack)-1] + if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { + // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue + return true + } + stack = stack[0 : len(stack)-1] + return true + }) + if len(root.Nodes) == 0 { + return &outline{[]*ginkgoNode{}}, nil + } + + // Derive the final focused property for all nodes. This must be done + // _before_ propagating the inherited focused property. + root.BackpropagateUnfocus() + // Now, propagate inherited properties, including focused and pending. + root.PropagateInheritedProperties() + + return &outline{root.Nodes}, nil +} + +type outline struct { + Nodes []*ginkgoNode `json:"nodes"` +} + +func (o *outline) MarshalJSON() ([]byte, error) { + return json.Marshal(o.Nodes) +} + +// String returns a CSV-formatted outline. Spec or container are output in +// depth-first order. +func (o *outline) String() string { + return o.StringIndent(0) +} + +// StringIndent returns a CSV-formated outline, but every line is indented by +// one 'width' of spaces for every level of nesting. +func (o *outline) StringIndent(width int) string { + var b strings.Builder + b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") + + currentIndent := 0 + pre := func(n *ginkgoNode) { + b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) + var labels string + if len(n.Labels) == 1 { + labels = n.Labels[0] + } else { + labels = strings.Join(n.Labels, ", ") + } + //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings + b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels)) + currentIndent += width + } + post := func(n *ginkgoNode) { + currentIndent -= width + } + for _, n := range o.Nodes { + n.Walk(pre, post) + } + return b.String() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go new file mode 100644 index 0000000..36698d4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go @@ -0,0 +1,98 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // indentWidth is the width used by the 'indent' output + indentWidth = 4 + // stdinAlias is a portable alias for stdin. This convention is used in + // other CLIs, e.g., kubectl. + stdinAlias = "-" + usageCommand = "ginkgo outline " +) + +type outlineConfig struct { + Format string +} + +func BuildOutlineCommand() command.Command { + conf := outlineConfig{ + Format: "csv", + } + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "format", KeyPath: "Format", + Usage: "Format of outline", + UsageArgument: "one of 'csv', 'indent', or 'json'", + UsageDefaultValue: conf.Format, + }, + }, + &conf, + types.GinkgoFlagSections{}, + ) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "outline", + Usage: "ginkgo outline ", + ShortDoc: "Create an outline of Ginkgo symbols for a file", + Documentation: "To read from stdin, use: `ginkgo outline -`", + DocLink: "creating-an-outline-of-specs", + Flags: flags, + Command: func(args []string, _ []string) { + outlineFile(args, conf.Format) + }, + } +} + +func outlineFile(args []string, format string) { + if len(args) != 1 { + command.AbortWithUsage("outline expects exactly one argument") + } + + filename := args[0] + var src *os.File + if filename == stdinAlias { + src = os.Stdin + } else { + var err error + src, err = os.Open(filename) + command.AbortIfError("Failed to open file:", err) + } + + fset := token.NewFileSet() + + parsedSrc, err := parser.ParseFile(fset, filename, src, 0) + command.AbortIfError("Failed to parse source:", err) + + o, err := FromASTFile(fset, parsedSrc) + command.AbortIfError("Failed to create outline:", err) + + var oerr error + switch format { + case "csv": + _, oerr = fmt.Print(o) + case "indent": + _, oerr = fmt.Print(o.StringIndent(indentWidth)) + case "json": + b, err := json.Marshal(o) + if err != nil { + println(fmt.Sprintf("error marshalling to json: %s", err)) + } + _, oerr = fmt.Println(string(b)) + default: + command.AbortWith("Format %s not accepted", format) + } + command.AbortIfError("Failed to write outline:", oerr) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go new file mode 100644 index 0000000..aaed4d5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -0,0 +1,232 @@ +package run + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildRunCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "run", + Flags: flags, + Usage: "ginkgo run -- ", + ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "running-tests", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + runner := &SpecRunner{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + runner.RunSpecs(args, additionalArgs) + }, + } +} + +type SpecRunner struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, r.cliConfig, true) + skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter) + suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(skippedSuites) > 0 { + fmt.Println("Will skip:") + for _, skippedSuite := range skippedSuites { + fmt.Println(" " + skippedSuite.Path) + } + } + + if len(skippedSuites) > 0 && len(suites) == 0 { + command.AbortGracefullyWith("All tests skipped! Exiting...") + } + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) { + r.reporterConfig.Succinct = true + } + + t := time.Now() + var endTime time.Time + if r.suiteConfig.Timeout > 0 { + endTime = t.Add(r.suiteConfig.Timeout) + } + + iteration := 0 +OUTER_LOOP: + for { + if !r.flags.WasSet("seed") { + r.suiteConfig.RandomSeed = time.Now().Unix() + } + if r.cliConfig.RandomizeSuites && len(suites) > 1 { + suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed) + } + + opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, r.goFlagsConfig) + + SUITE_LOOP: + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break SUITE_LOOP + } + suites[suiteIdx] = suite + + if r.interruptHandler.Status().Interrupted() { + opc.StopAndDrain() + break OUTER_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) { + fmt.Printf("Skipping %s (no test files)\n", suite.Path) + continue SUITE_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suites[suiteIdx].CompilationError.Error()) + if !r.cliConfig.KeepGoing { + opc.StopAndDrain() + } + continue SUITE_LOOP + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing { + suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures + opc.StopAndDrain() + continue SUITE_LOOP + } + + if !endTime.IsZero() { + r.suiteConfig.Timeout = endTime.Sub(time.Now()) + if r.suiteConfig.Timeout <= 0 { + suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout + opc.StopAndDrain() + continue SUITE_LOOP + } + } + + suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs) + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + if iteration > 0 { + fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1) + } + break OUTER_LOOP + } + + if r.cliConfig.UntilItFails { + fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1)) + } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat { + fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1) + } else { + break OUTER_LOOP + } + iteration += 1 + } + + internal.Cleanup(r.goFlagsConfig, suites...) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + + fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t)) + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 { + if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Printf("Test Suite Passed\n") + fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) + command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE}) + } else { + fmt.Printf("Test Suite Passed\n") + command.Abort(command.AbortDetails{}) + } + } else { + fmt.Fprintln(formatter.ColorableStdOut, "") + if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + fmt.Fprintln(formatter.ColorableStdOut, + internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor))) + } + fmt.Printf("Test Suite Failed\n") + command.Abort(command.AbortDetails{ExitCode: 1}) + } +} + +func orcMessage(iteration int) string { + if iteration < 10 { + return "" + } else if iteration < 30 { + return []string{ + "If at first you succeed...", + "...try, try again.", + "Looking good!", + "Still good...", + "I think your tests are fine....", + "Yep, still passing", + "Oh boy, here I go testin' again!", + "Even the gophers are getting bored", + "Did you try -race?", + "Maybe you should stop now?", + "I'm getting tired...", + "What if I just made you a sandwich?", + "Hit ^C, hit ^C, please hit ^C", + "Make it stop. Please!", + "Come on! Enough is enough!", + "Dave, this conversation can serve no purpose anymore. Goodbye.", + "Just what do you think you're doing, Dave? ", + "I, Sisyphus", + "Insanity: doing the same thing over and over again and expecting different results. -Einstein", + "I guess Einstein never tried to churn butter", + }[iteration-10] + "\n" + } else { + return "No, seriously... you can probably stop now.\n" + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go new file mode 100644 index 0000000..7dd2943 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go @@ -0,0 +1,186 @@ +package unfocus + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func BuildUnfocusCommand() command.Command { + return command.Command{ + Name: "unfocus", + Usage: "ginkgo unfocus", + ShortDoc: "Recursively unfocus any focused tests under the current directory", + DocLink: "filtering-specs", + Command: func(_ []string, _ []string) { + unfocusSpecs() + }, + } +} + +func unfocusSpecs() { + fmt.Println("Scanning for focus...") + + goFiles := make(chan string) + go func() { + unfocusDir(goFiles, ".") + close(goFiles) + }() + + const workers = 10 + wg := sync.WaitGroup{} + wg.Add(workers) + + for i := 0; i < workers; i++ { + go func() { + for path := range goFiles { + unfocusFile(path) + } + wg.Done() + }() + } + + wg.Wait() +} + +func unfocusDir(goFiles chan string, path string) { + files, err := os.ReadDir(path) + if err != nil { + fmt.Println(err.Error()) + return + } + + for _, f := range files { + switch { + case f.IsDir() && shouldProcessDir(f.Name()): + unfocusDir(goFiles, filepath.Join(path, f.Name())) + case !f.IsDir() && shouldProcessFile(f.Name()): + goFiles <- filepath.Join(path, f.Name()) + } + } +} + +func shouldProcessDir(basename string) bool { + return basename != "vendor" && !strings.HasPrefix(basename, ".") +} + +func shouldProcessFile(basename string) bool { + return strings.HasSuffix(basename, ".go") +} + +func unfocusFile(path string) { + data, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading file '%s': %s\n", path, err.Error()) + return + } + + ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments) + if err != nil { + fmt.Printf("error parsing file '%s': %s\n", path, err.Error()) + return + } + + eliminations := scanForFocus(ast) + if len(eliminations) == 0 { + return + } + + fmt.Printf("...updating %s\n", path) + backup, err := writeBackup(path, data) + if err != nil { + fmt.Printf("error creating backup file: %s\n", err.Error()) + return + } + + if err := updateFile(path, data, eliminations); err != nil { + fmt.Printf("error writing file '%s': %s\n", path, err.Error()) + return + } + + os.Remove(backup) +} + +func writeBackup(path string, data []byte) (string, error) { + t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path)) + + if err != nil { + return "", fmt.Errorf("error creating temporary file: %w", err) + } + defer t.Close() + + if _, err := io.Copy(t, bytes.NewReader(data)); err != nil { + return "", fmt.Errorf("error writing to temporary file: %w", err) + } + + return t.Name(), nil +} + +func updateFile(path string, data []byte, eliminations [][]int64) error { + to, err := os.Create(path) + if err != nil { + return fmt.Errorf("error opening file for writing '%s': %w\n", path, err) + } + defer to.Close() + + from := bytes.NewReader(data) + var cursor int64 + for _, eliminationRange := range eliminations { + positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1] + if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil { + return fmt.Errorf("error copying data: %w", err) + } + + cursor = positionToEliminate + lengthToEliminate + + if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil { + return fmt.Errorf("error seeking to position in buffer: %w", err) + } + } + + if _, err := io.Copy(to, from); err != nil { + return fmt.Errorf("error copying end data: %w", err) + } + + return nil +} + +func scanForFocus(file *ast.File) (eliminations [][]int64) { + ast.Inspect(file, func(n ast.Node) bool { + if c, ok := n.(*ast.CallExpr); ok { + if i, ok := c.Fun.(*ast.Ident); ok { + if isFocus(i.Name) { + eliminations = append(eliminations, []int64{int64(i.Pos()), 1}) + } + } + } + + if i, ok := n.(*ast.Ident); ok { + if i.Name == "Focus" { + eliminations = append(eliminations, []int64{int64(i.Pos()), 6}) + } + } + + return true + }) + + return eliminations +} + +func isFocus(name string) bool { + switch name { + case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen": + return true + default: + return false + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go new file mode 100644 index 0000000..6c485c5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go @@ -0,0 +1,22 @@ +package watch + +import "sort" + +type Delta struct { + ModifiedPackages []string + + NewSuites []*Suite + RemovedSuites []*Suite + modifiedSuites []*Suite +} + +type DescendingByDelta []*Suite + +func (a DescendingByDelta) Len() int { return len(a) } +func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } + +func (d Delta) ModifiedSuites() []*Suite { + sort.Sort(DescendingByDelta(d.modifiedSuites)) + return d.modifiedSuites +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go new file mode 100644 index 0000000..26418ac --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go @@ -0,0 +1,75 @@ +package watch + +import ( + "fmt" + + "regexp" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type SuiteErrors map[internal.TestSuite]error + +type DeltaTracker struct { + maxDepth int + watchRegExp *regexp.Regexp + suites map[string]*Suite + packageHashes *PackageHashes +} + +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { + return &DeltaTracker{ + maxDepth: maxDepth, + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), + suites: map[string]*Suite{}, + } +} + +func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) { + errors = SuiteErrors{} + delta.ModifiedPackages = d.packageHashes.CheckForChanges() + + providedSuitePaths := map[string]bool{} + for _, suite := range suites { + providedSuitePaths[suite.Path] = true + } + + d.packageHashes.StartTrackingUsage() + + for _, suite := range d.suites { + if providedSuitePaths[suite.Suite.Path] { + if suite.Delta() > 0 { + delta.modifiedSuites = append(delta.modifiedSuites, suite) + } + } else { + delta.RemovedSuites = append(delta.RemovedSuites, suite) + } + } + + d.packageHashes.StopTrackingUsageAndPrune() + + for _, suite := range suites { + _, ok := d.suites[suite.Path] + if !ok { + s, err := NewSuite(suite, d.maxDepth, d.packageHashes) + if err != nil { + errors[suite] = err + continue + } + d.suites[suite.Path] = s + delta.NewSuites = append(delta.NewSuites, s) + } + } + + return delta, errors +} + +func (d *DeltaTracker) WillRun(suite internal.TestSuite) error { + s, ok := d.suites[suite.Path] + if !ok { + return fmt.Errorf("unknown suite %s", suite.Path) + } + + return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go new file mode 100644 index 0000000..f5ddff3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -0,0 +1,92 @@ +package watch + +import ( + "go/build" + "regexp" +) + +var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) +var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing + +type Dependencies struct { + deps map[string]int +} + +func NewDependencies(path string, maxDepth int) (Dependencies, error) { + d := Dependencies{ + deps: map[string]int{}, + } + + if maxDepth == 0 { + return d, nil + } + + err := d.seedWithDepsForPackageAtPath(path) + if err != nil { + return d, err + } + + for depth := 1; depth < maxDepth; depth++ { + n := len(d.deps) + d.addDepsForDepth(depth) + if n == len(d.deps) { + break + } + } + + return d, nil +} + +func (d Dependencies) Dependencies() map[string]int { + return d.deps +} + +func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { + pkg, err := build.ImportDir(path, 0) + if err != nil { + return err + } + + d.resolveAndAdd(pkg.Imports, 1) + d.resolveAndAdd(pkg.TestImports, 1) + d.resolveAndAdd(pkg.XTestImports, 1) + + delete(d.deps, pkg.Dir) + return nil +} + +func (d Dependencies) addDepsForDepth(depth int) { + for dep, depDepth := range d.deps { + if depDepth == depth { + d.addDepsForDep(dep, depth+1) + } + } +} + +func (d Dependencies) addDepsForDep(dep string, depth int) { + pkg, err := build.ImportDir(dep, 0) + if err != nil { + println(err.Error()) + return + } + d.resolveAndAdd(pkg.Imports, depth) +} + +func (d Dependencies) resolveAndAdd(deps []string, depth int) { + for _, dep := range deps { + pkg, err := build.Import(dep, ".", 0) + if err != nil { + continue + } + if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) { + d.addDepIfNotPresent(pkg.Dir, depth) + } + } +} + +func (d Dependencies) addDepIfNotPresent(dep string, depth int) { + _, ok := d.deps[dep] + if !ok { + d.deps[dep] = depth + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go new file mode 100644 index 0000000..e9f7ec0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -0,0 +1,108 @@ +package watch + +import ( + "fmt" + "os" + "regexp" + "time" +) + +var goTestRegExp = regexp.MustCompile(`_test\.go$`) + +type PackageHash struct { + CodeModifiedTime time.Time + TestModifiedTime time.Time + Deleted bool + + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp +} + +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { + p := &PackageHash{ + path: path, + watchRegExp: watchRegExp, + } + + p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() + + return p +} + +func (p *PackageHash) CheckForChanges() bool { + codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() + + if deleted { + if !p.Deleted { + t := time.Now() + p.CodeModifiedTime = t + p.TestModifiedTime = t + } + p.Deleted = true + return true + } + + modified := false + p.Deleted = false + + if p.codeHash != codeHash { + p.CodeModifiedTime = codeModifiedTime + modified = true + } + if p.testHash != testHash { + p.TestModifiedTime = testModifiedTime + modified = true + } + + p.codeHash = codeHash + p.testHash = testHash + return modified +} + +func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { + entries, err := os.ReadDir(p.path) + + if err != nil { + deleted = true + return + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + info, err := entry.Info() + if err != nil { + continue + } + + if goTestRegExp.Match([]byte(info.Name())) { + testHash += p.hashForFileInfo(info) + if info.ModTime().After(testModifiedTime) { + testModifiedTime = info.ModTime() + } + continue + } + + if p.watchRegExp.Match([]byte(info.Name())) { + codeHash += p.hashForFileInfo(info) + if info.ModTime().After(codeModifiedTime) { + codeModifiedTime = info.ModTime() + } + } + } + + testHash += codeHash + if codeModifiedTime.After(testModifiedTime) { + testModifiedTime = codeModifiedTime + } + + return +} + +func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { + return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go new file mode 100644 index 0000000..b4892be --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go @@ -0,0 +1,85 @@ +package watch + +import ( + "path/filepath" + "regexp" + "sync" +) + +type PackageHashes struct { + PackageHashes map[string]*PackageHash + usedPaths map[string]bool + watchRegExp *regexp.Regexp + lock *sync.Mutex +} + +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { + return &PackageHashes{ + PackageHashes: map[string]*PackageHash{}, + usedPaths: nil, + watchRegExp: watchRegExp, + lock: &sync.Mutex{}, + } +} + +func (p *PackageHashes) CheckForChanges() []string { + p.lock.Lock() + defer p.lock.Unlock() + + modified := []string{} + + for _, packageHash := range p.PackageHashes { + if packageHash.CheckForChanges() { + modified = append(modified, packageHash.path) + } + } + + return modified +} + +func (p *PackageHashes) Add(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + _, ok := p.PackageHashes[path] + if !ok { + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) + } + + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) Get(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) StartTrackingUsage() { + p.lock.Lock() + defer p.lock.Unlock() + + p.usedPaths = map[string]bool{} +} + +func (p *PackageHashes) StopTrackingUsageAndPrune() { + p.lock.Lock() + defer p.lock.Unlock() + + for path := range p.PackageHashes { + if !p.usedPaths[path] { + delete(p.PackageHashes, path) + } + } + + p.usedPaths = nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go new file mode 100644 index 0000000..53272df --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go @@ -0,0 +1,87 @@ +package watch + +import ( + "fmt" + "math" + "time" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type Suite struct { + Suite internal.TestSuite + RunTime time.Time + Dependencies Dependencies + + sharedPackageHashes *PackageHashes +} + +func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { + deps, err := NewDependencies(suite.Path, maxDepth) + if err != nil { + return nil, err + } + + sharedPackageHashes.Add(suite.Path) + for dep := range deps.Dependencies() { + sharedPackageHashes.Add(dep) + } + + return &Suite{ + Suite: suite, + Dependencies: deps, + + sharedPackageHashes: sharedPackageHashes, + }, nil +} + +func (s *Suite) Delta() float64 { + delta := s.delta(s.Suite.Path, true, 0) * 1000 + for dep, depth := range s.Dependencies.Dependencies() { + delta += s.delta(dep, false, depth) + } + return delta +} + +func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { + s.RunTime = time.Now() + + deps, err := NewDependencies(s.Suite.Path, maxDepth) + if err != nil { + return err + } + + s.sharedPackageHashes.Add(s.Suite.Path) + for dep := range deps.Dependencies() { + s.sharedPackageHashes.Add(dep) + } + + s.Dependencies = deps + + return nil +} + +func (s *Suite) Description() string { + numDeps := len(s.Dependencies.Dependencies()) + pluralizer := "ies" + if numDeps == 1 { + pluralizer = "y" + } + return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) +} + +func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { + return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) +} + +func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { + packageHash := s.sharedPackageHashes.Get(packagePath) + var modifiedTime time.Time + if includeTests { + modifiedTime = packageHash.TestModifiedTime + } else { + modifiedTime = packageHash.CodeModifiedTime + } + + return modifiedTime.Sub(s.RunTime) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go new file mode 100644 index 0000000..bde4193 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -0,0 +1,192 @@ +package watch + +import ( + "fmt" + "regexp" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildWatchCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "watch", + Flags: flags, + Usage: "ginkgo watch -- ", + ShortDoc: "Watch the passed in and runs their tests whenever changes occur.", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "watching-for-changes", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + watcher := &SpecWatcher{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + watcher.WatchSpecs(args, additionalArgs) + }, + } +} + +type SpecWatcher struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth) + deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp)) + delta, errors := deltaTracker.Delta(suites) + + fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))) + for _, suite := range delta.NewSuites { + fmt.Println(" " + suite.Description()) + } + + for suite, err := range errors { + fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) + } + + if len(suites) == 1 { + w.updateSeed() + w.compileAndRun(suites[0], additionalArgs) + } + + ticker := time.NewTicker(time.Second) + + for { + select { + case <-ticker.C: + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + delta, _ := deltaTracker.Delta(suites) + coloredStream := formatter.ColorableStdOut + + suites = internal.TestSuites{} + + if len(delta.NewSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))) + for _, suite := range delta.NewSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + } + + modifiedSuites := delta.ModifiedSuites() + if len(modifiedSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}")) + for _, pkg := range delta.ModifiedPackages { + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg)) + } + fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites)))) + for _, suite := range modifiedSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + fmt.Fprintln(coloredStream, "") + } + + if len(suites) == 0 { + break + } + + w.updateSeed() + w.computeSuccinctMode(len(suites)) + for idx := range suites { + if w.interruptHandler.Status().Interrupted() { + return + } + deltaTracker.WillRun(suites[idx]) + suites[idx] = w.compileAndRun(suites[idx], additionalArgs) + } + color := "{{green}}" + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + color = "{{red}}" + } + fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}")) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + case <-w.interruptHandler.Status().Channel: + return + } + } +} + +func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { + suite = internal.CompileSuite(suite, w.goFlagsConfig) + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + return suite + } + if w.interruptHandler.Status().Interrupted() { + return suite + } + suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs) + internal.Cleanup(w.goFlagsConfig, suite) + return suite +} + +func (w *SpecWatcher) computeSuccinctMode(numSuites int) { + if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) { + w.reporterConfig.Succinct = false + return + } + + if w.flags.WasSet("succinct") { + return + } + + if numSuites == 1 { + w.reporterConfig.Succinct = false + } + + if numSuites > 1 { + w.reporterConfig.Succinct = true + } +} + +func (w *SpecWatcher) updateSeed() { + if !w.flags.WasSet("seed") { + w.suiteConfig.RandomSeed = time.Now().Unix() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go new file mode 100644 index 0000000..8516272 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go @@ -0,0 +1,8 @@ +//go:build ginkgoclidependencies +// +build ginkgoclidependencies + +package ginkgo + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo" +) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 1beeb11..1707314 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -1,26 +1,42 @@ package ginkgo -import "github.com/onsi/ginkgo/v2/internal/testingtproxy" +import ( + "github.com/onsi/ginkgo/v2/internal/testingtproxy" +) /* -GinkgoT() implements an interface analogous to *testing.T and can be used with -third-party libraries that accept *testing.T through an interface. +GinkgoT() implements an interface that allows third party libraries to integrate with and build on top of Ginkgo. + +GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's methods. It can be typically be used a a drop-in replacement with third-party libraries that accept *testing.T through an interface. GinkgoT() takes an optional offset argument that can be used to get the -correct line number associated with the failure. +correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries */ -func GinkgoT(optionalOffset ...int) GinkgoTInterface { +func GinkgoT(optionalOffset ...int) FullGinkgoTInterface { offset := 3 if len(optionalOffset) > 0 { offset = optionalOffset[0] } - return testingtproxy.New(GinkgoWriter, Fail, Skip, DeferCleanup, CurrentSpecReport, offset) + return testingtproxy.New( + GinkgoWriter, + Fail, + Skip, + DeferCleanup, + CurrentSpecReport, + AddReportEntry, + GinkgoRecover, + AttachProgressReporter, + suiteConfig.RandomSeed, + suiteConfig.ParallelProcess, + suiteConfig.ParallelTotal, + reporterConfig.NoColor, + offset) } /* -The interface returned by GinkgoT(). This covers most of the methods in the testing package's T. +The portion of the interface returned by GinkgoT() that maps onto methods in the testing package's T. */ type GinkgoTInterface interface { Cleanup(func()) @@ -43,3 +59,33 @@ type GinkgoTInterface interface { Skipped() bool TempDir() string } + +/* +Additional methods returned by GinkgoT() that provide deeper integration points into Ginkgo +*/ +type FullGinkgoTInterface interface { + GinkgoTInterface + + AddReportEntryVisibilityAlways(name string, args ...any) + AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) + AddReportEntryVisibilityNever(name string, args ...any) + + //Prints to the GinkgoWriter + Print(a ...interface{}) + Printf(format string, a ...interface{}) + Println(a ...interface{}) + + //Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo + F(format string, args ...any) string + Fi(indentation uint, format string, args ...any) string + Fiw(indentation uint, maxWidth uint, format string, args ...any) string + + GinkgoRecover() + DeferCleanup(args ...any) + + RandomSeed() int64 + ParallelProcess() int + ParallelTotal() int + + AttachProgressReporter(func() string) func() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go index c6546bb..ae1b7b0 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -94,15 +94,19 @@ type group struct { runOncePairs map[uint]runOncePairs runOnceTracker map[runOncePair]types.SpecState - succeeded bool + succeeded bool + failedInARunOnceBefore bool + continueOnFailure bool } func newGroup(suite *Suite) *group { return &group{ - suite: suite, - runOncePairs: map[uint]runOncePairs{}, - runOnceTracker: map[runOncePair]types.SpecState{}, - succeeded: true, + suite: suite, + runOncePairs: map[uint]runOncePairs{}, + runOnceTracker: map[runOncePair]types.SpecState{}, + succeeded: true, + failedInARunOnceBefore: false, + continueOnFailure: false, } } @@ -116,8 +120,11 @@ func (g *group) initialReportForSpec(spec Spec) types.SpecReport { LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), ParallelProcess: g.suite.config.ParallelProcess, + RunningInParallel: g.suite.isRunningInParallel(), IsSerial: spec.Nodes.HasNodeMarkedSerial(), IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), + MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), } } @@ -128,13 +135,20 @@ func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) { if spec.Skip { return types.SpecStateSkipped, types.Failure{} } - if g.suite.interruptHandler.Status().Interrupted || g.suite.skipAll { + if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll { return types.SpecStateSkipped, types.Failure{} } - if !g.succeeded { + if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) { + return types.SpecStateSkipped, types.Failure{} + } + if !g.succeeded && !g.continueOnFailure { return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), "Spec skipped because an earlier spec in an ordered container failed") } + if g.failedInARunOnceBefore && g.continueOnFailure { + return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because a BeforeAll node failed") + } beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach) for _, pair := range beforeOncePairs { if g.runOnceTracker[pair].Is(types.SpecStateSkipped) { @@ -162,9 +176,8 @@ func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool { return lastSpecID == specID } -func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { - interruptStatus := g.suite.interruptHandler.Status() - +func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool { + failedInARunOnceBefore := false pairs := g.runOncePairs[spec.SubjectID()] nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll) @@ -173,18 +186,24 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt)) terminatingNode, terminatingPair := Node{}, runOncePair{} + deadline := time.Time{} + if spec.SpecTimeout() > 0 { + deadline = time.Now().Add(spec.SpecTimeout()) + } + for _, node := range nodes { oncePair := pairs.runOncePairFor(node.ID) if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) { continue } - g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, interruptStatus.Channel, spec.Nodes.BestTextFor(node)) + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node)) g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) if !oncePair.isZero() { g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State } if g.suite.currentSpecReport.State != types.SpecStatePassed { terminatingNode, terminatingPair = node, oncePair + failedInARunOnceBefore = !terminatingPair.isZero() break } } @@ -207,7 +226,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { //this node has already been run on this attempt, don't rerun it return false } - pair := runOncePair{} + var pair runOncePair switch node.NodeType { case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll: // check if we were generated in an AfterNode that has already run @@ -237,9 +256,13 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel { return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run } - case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed... if isFinalAttempt { - return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run + if g.continueOnFailure { + return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run) + } else { + return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run + } } if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) { @@ -260,26 +283,33 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { for _, node := range nodes { afterNodeWasRun[node.ID] = true - state, failure := g.suite.runNode(node, g.suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(node)) + state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node)) g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { g.suite.currentSpecReport.State = state g.suite.currentSpecReport.Failure = failure + } else if state.Is(types.SpecStateFailureStates) { + g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure}) } } includeDeferCleanups = true } + return failedInARunOnceBefore } func (g *group) run(specs Specs) { g.specs = specs + g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure for _, spec := range g.specs { g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec) } for _, spec := range g.specs { + g.suite.selectiveLock.Lock() g.suite.currentSpecReport = g.initialReportForSpec(spec) + g.suite.selectiveLock.Unlock() + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec) g.suite.reporter.WillRun(g.suite.currentSpecReport) g.suite.reportEach(spec, types.NodeTypeReportBeforeEach) @@ -287,28 +317,52 @@ func (g *group) run(specs Specs) { skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending) g.suite.currentSpecReport.StartTime = time.Now() + failedInARunOnceBefore := false if !skip { - maxAttempts := max(1, spec.FlakeAttempts()) - if g.suite.config.FlakeAttempts > 0 { + var maxAttempts = 1 + + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + maxAttempts = max(1, spec.MustPassRepeatedly()) + } else if g.suite.config.FlakeAttempts > 0 { maxAttempts = g.suite.config.FlakeAttempts + g.suite.currentSpecReport.MaxFlakeAttempts = maxAttempts + } else if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + maxAttempts = max(1, spec.FlakeAttempts()) } + for attempt := 0; attempt < maxAttempts; attempt++ { g.suite.currentSpecReport.NumAttempts = attempt + 1 g.suite.writer.Truncate() g.suite.outputInterceptor.StartInterceptingOutput() if attempt > 0 { - fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt}) + } + if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt}) + } } - g.attemptSpec(attempt == maxAttempts-1, spec) + failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec) g.suite.currentSpecReport.EndTime = time.Now() g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime) g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes()) g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput() - if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { - break + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates | types.SpecStateSkipped) { + break + } + } + if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { + break + } else if attempt < maxAttempts-1 { + af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure} + af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message) + g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af) + } } } } @@ -317,228 +371,10 @@ func (g *group) run(specs Specs) { g.suite.processCurrentSpecReport() if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { g.succeeded = false + g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore } + g.suite.selectiveLock.Lock() g.suite.currentSpecReport = types.SpecReport{} - } -} - -func (g *group) oldRun(specs Specs) { - var suite = g.suite - nodeState := map[uint]types.SpecState{} - groupSucceeded := true - - indexOfLastSpecContainingNodeID := func(id uint) int { - lastIdx := -1 - for idx := range specs { - if specs[idx].Nodes.ContainsNodeID(id) && !specs[idx].Skip { - lastIdx = idx - } - } - return lastIdx - } - - for i, spec := range specs { - suite.currentSpecReport = types.SpecReport{ - ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), - ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), - ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), - LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, - LeafNodeType: types.NodeTypeIt, - LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, - LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), - ParallelProcess: suite.config.ParallelProcess, - IsSerial: spec.Nodes.HasNodeMarkedSerial(), - IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), - } - - skip := spec.Skip - if spec.Nodes.HasNodeMarkedPending() { - skip = true - suite.currentSpecReport.State = types.SpecStatePending - } else { - if suite.interruptHandler.Status().Interrupted || suite.skipAll { - skip = true - } - if !groupSucceeded { - skip = true - suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), - "Spec skipped because an earlier spec in an ordered container failed") - } - for _, node := range spec.Nodes.WithType(types.NodeTypeBeforeAll) { - if nodeState[node.ID] == types.SpecStateSkipped { - skip = true - suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), - "Spec skipped because Skip() was called in BeforeAll") - break - } - } - if skip { - suite.currentSpecReport.State = types.SpecStateSkipped - } - } - - if suite.config.DryRun && !skip { - skip = true - suite.currentSpecReport.State = types.SpecStatePassed - } - - suite.reporter.WillRun(suite.currentSpecReport) - //send the spec report to any attached ReportBeforeEach blocks - this will update suite.currentSpecReport if failures occur in these blocks - suite.reportEach(spec, types.NodeTypeReportBeforeEach) - if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { - //the reportEach failed, skip this spec - skip = true - } - - suite.currentSpecReport.StartTime = time.Now() - maxAttempts := max(1, spec.FlakeAttempts()) - if suite.config.FlakeAttempts > 0 { - maxAttempts = suite.config.FlakeAttempts - } - - for attempt := 0; !skip && (attempt < maxAttempts); attempt++ { - suite.currentSpecReport.NumAttempts = attempt + 1 - suite.writer.Truncate() - suite.outputInterceptor.StartInterceptingOutput() - if attempt > 0 { - fmt.Fprintf(suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) - } - isFinalAttempt := (attempt == maxAttempts-1) - - interruptStatus := suite.interruptHandler.Status() - deepestNestingLevelAttained := -1 - var nodes = spec.Nodes.WithType(types.NodeTypeBeforeAll).Filter(func(n Node) bool { - return nodeState[n.ID] != types.SpecStatePassed - }) - nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel() - nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...) - nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeIt)...) - - var terminatingNode Node - for j := range nodes { - deepestNestingLevelAttained = max(deepestNestingLevelAttained, nodes[j].NestingLevel) - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(nodes[j], interruptStatus.Channel, spec.Nodes.BestTextFor(nodes[j])) - suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime) - nodeState[nodes[j].ID] = suite.currentSpecReport.State - if suite.currentSpecReport.State != types.SpecStatePassed { - terminatingNode = nodes[j] - break - } - } - - afterAllNodesThatRan := map[uint]bool{} - // pull out some shared code so we aren't repeating ourselves down below. this just runs after and cleanup nodes - runAfterAndCleanupNodes := func(nodes Nodes) { - for j := range nodes { - state, failure := suite.runNode(nodes[j], suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(nodes[j])) - suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime) - nodeState[nodes[j].ID] = state - if suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { - suite.currentSpecReport.State = state - suite.currentSpecReport.Failure = failure - if state != types.SpecStatePassed { - terminatingNode = nodes[j] - } - } - if nodes[j].NodeType.Is(types.NodeTypeAfterAll) { - afterAllNodesThatRan[nodes[j].ID] = true - } - } - } - - // pull out a helper that captures the logic of whether or not we should run a given After node. - // there is complexity here stemming from the fact that we allow nested ordered contexts and flakey retries - shouldRunAfterNode := func(n Node) bool { - if n.NodeType.Is(types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { - return true - } - var id uint - if n.NodeType.Is(types.NodeTypeAfterAll) { - id = n.ID - if afterAllNodesThatRan[id] { //we've already run on this attempt. don't run again. - return false - } - } - if n.NodeType.Is(types.NodeTypeCleanupAfterAll) { - id = n.NodeIDWhereCleanupWasGenerated - } - isLastSpecWithNode := indexOfLastSpecContainingNodeID(id) == i - - switch suite.currentSpecReport.State { - case types.SpecStatePassed: //we've passed so far... - return isLastSpecWithNode //... and we're the last spec with this AfterNode, so we should run it - case types.SpecStateSkipped: //the spec was skipped by the user... - if isLastSpecWithNode { - return true //...we're the last spec, so we should run the AfterNode - } - if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) && terminatingNode.NestingLevel == n.NestingLevel { - return true //...or, a BeforeAll was skipped and it's at our nesting level, so our subgroup is going to skip - } - case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... - if isFinalAttempt { - return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run - } - if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) { - //...we'll be rerunning a BeforeAll so we should cleanup after it if... - if n.NodeType.Is(types.NodeTypeAfterAll) && terminatingNode.NestingLevel == n.NestingLevel { - return true //we're at the same nesting level - } - if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated { - return true //we're a DeferCleanup generated by it - } - } - if terminatingNode.NodeType.Is(types.NodeTypeAfterAll) { - //...we'll be rerunning an AfterAll so we should cleanup after it if... - if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated { - return true //we're a DeferCleanup generated by it - } - } - case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted - return true //...that means the test run is over and we should clean up the stack. Run the AfterNode - } - return false - } - - // first pass - run all the JustAfterEach, Aftereach, and AfterAlls. Our shoudlRunAfterNode filter function will clean up the AfterAlls for us. - afterNodes := spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel() - afterNodes = afterNodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterEach).CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()...) - afterNodes = afterNodes.WithinNestingLevel(deepestNestingLevelAttained) - afterNodes = afterNodes.Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - // second-pass perhaps we didn't run the AfterAlls but a state change due to an AfterEach now requires us to run the AfterAlls: - afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - // now we run any DeferCleanups - afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse() - afterNodes = append(afterNodes, suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Filter(shouldRunAfterNode).Reverse()...) - runAfterAndCleanupNodes(afterNodes) - - // third-pass, perhaps a DeferCleanup failed and now we need to run the AfterAlls. - afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - // and finally - running AfterAlls may have generated some new DeferCleanup nodes, let's run them to finish up - afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse().Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - suite.currentSpecReport.EndTime = time.Now() - suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) - suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes()) - suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() - - if suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { - break - } - } - - //send the spec report to any attached ReportAfterEach blocks - this will update suite.currentSpecReport if failures occur in these blocks - suite.reportEach(spec, types.NodeTypeReportAfterEach) - suite.processCurrentSpecReport() - if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { - groupSucceeded = false - } - suite.currentSpecReport = types.SpecReport{} + g.suite.selectiveLock.Unlock() } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go index aca7d1c..ac6f510 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -1,39 +1,38 @@ package interrupt_handler import ( - "fmt" "os" "os/signal" - "runtime" "sync" "syscall" "time" - "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal/parallel_support" ) -const TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION = 30 * time.Second -const TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT = 10 const ABORT_POLLING_INTERVAL = 500 * time.Millisecond -const ABORT_REPEAT_INTERRUPT_DURATION = 30 * time.Second type InterruptCause uint const ( InterruptCauseInvalid InterruptCause = iota - InterruptCauseSignal - InterruptCauseTimeout InterruptCauseAbortByOtherProcess ) +type InterruptLevel uint + +const ( + InterruptLevelUninterrupted InterruptLevel = iota + InterruptLevelCleanupAndReport + InterruptLevelReportOnly + InterruptLevelBailOut +) + func (ic InterruptCause) String() string { switch ic { case InterruptCauseSignal: return "Interrupted by User" - case InterruptCauseTimeout: - return "Interrupted by Timeout" case InterruptCauseAbortByOtherProcess: return "Interrupted by Other Ginkgo Process" } @@ -41,37 +40,49 @@ func (ic InterruptCause) String() string { } type InterruptStatus struct { - Interrupted bool - Channel chan interface{} - Cause InterruptCause + Channel chan interface{} + Level InterruptLevel + Cause InterruptCause +} + +func (s InterruptStatus) Interrupted() bool { + return s.Level != InterruptLevelUninterrupted +} + +func (s InterruptStatus) Message() string { + return s.Cause.String() +} + +func (s InterruptStatus) ShouldIncludeProgressReport() bool { + return s.Cause != InterruptCauseAbortByOtherProcess } type InterruptHandlerInterface interface { Status() InterruptStatus - SetInterruptPlaceholderMessage(string) - ClearInterruptPlaceholderMessage() - InterruptMessageWithStackTraces() string } type InterruptHandler struct { - c chan interface{} - lock *sync.Mutex - interrupted bool - interruptPlaceholderMessage string - interruptCause InterruptCause - client parallel_support.Client - stop chan interface{} + c chan interface{} + lock *sync.Mutex + level InterruptLevel + cause InterruptCause + client parallel_support.Client + stop chan interface{} + signals []os.Signal } -func NewInterruptHandler(timeout time.Duration, client parallel_support.Client) *InterruptHandler { - handler := &InterruptHandler{ - c: make(chan interface{}), - lock: &sync.Mutex{}, - interrupted: false, - stop: make(chan interface{}), - client: client, +func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { + if len(signals) == 0 { + signals = []os.Signal{os.Interrupt, syscall.SIGTERM} } - handler.registerForInterrupts(timeout) + handler := &InterruptHandler{ + c: make(chan interface{}), + lock: &sync.Mutex{}, + stop: make(chan interface{}), + client: client, + signals: signals, + } + handler.registerForInterrupts() return handler } @@ -79,30 +90,22 @@ func (handler *InterruptHandler) Stop() { close(handler.stop) } -func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) { +func (handler *InterruptHandler) registerForInterrupts() { // os signal handling signalChannel := make(chan os.Signal, 1) - signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM) - - // timeout handling - var timeoutChannel <-chan time.Time - var timeoutTimer *time.Timer - if timeout > 0 { - timeoutTimer = time.NewTimer(timeout) - timeoutChannel = timeoutTimer.C - } + signal.Notify(signalChannel, handler.signals...) // cross-process abort handling - var abortChannel chan bool + var abortChannel chan interface{} if handler.client != nil { - abortChannel = make(chan bool) + abortChannel = make(chan interface{}) go func() { pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) for { select { case <-pollTicker.C: if handler.client.ShouldAbort() { - abortChannel <- true + close(abortChannel) pollTicker.Stop() return } @@ -114,55 +117,37 @@ func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) { }() } - // listen for any interrupt signals - // note that some (timeouts, cross-process aborts) will only trigger once - // for these we set up a ticker to keep interrupting the suite until it ends - // this ensures any `AfterEach` or `AfterSuite`s that get stuck cleaning up - // get interrupted eventually - go func() { + go func(abortChannel chan interface{}) { var interruptCause InterruptCause - var repeatChannel <-chan time.Time - var repeatTicker *time.Ticker for { select { case <-signalChannel: interruptCause = InterruptCauseSignal - case <-timeoutChannel: - interruptCause = InterruptCauseTimeout - repeatInterruptTimeout := timeout / time.Duration(TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT) - if repeatInterruptTimeout > TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION { - repeatInterruptTimeout = TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION - } - timeoutTimer.Stop() - repeatTicker = time.NewTicker(repeatInterruptTimeout) - repeatChannel = repeatTicker.C case <-abortChannel: interruptCause = InterruptCauseAbortByOtherProcess - repeatTicker = time.NewTicker(ABORT_REPEAT_INTERRUPT_DURATION) - repeatChannel = repeatTicker.C - case <-repeatChannel: - //do nothing, just interrupt again using the same interruptCause case <-handler.stop: - if timeoutTimer != nil { - timeoutTimer.Stop() - } - if repeatTicker != nil { - repeatTicker.Stop() - } signal.Stop(signalChannel) return } + abortChannel = nil + handler.lock.Lock() - handler.interruptCause = interruptCause - if handler.interruptPlaceholderMessage != "" { - fmt.Println(handler.interruptPlaceholderMessage) + oldLevel := handler.level + handler.cause = interruptCause + if handler.level == InterruptLevelUninterrupted { + handler.level = InterruptLevelCleanupAndReport + } else if handler.level == InterruptLevelCleanupAndReport { + handler.level = InterruptLevelReportOnly + } else if handler.level == InterruptLevelReportOnly { + handler.level = InterruptLevelBailOut + } + if handler.level != oldLevel { + close(handler.c) + handler.c = make(chan interface{}) } - handler.interrupted = true - close(handler.c) - handler.c = make(chan interface{}) handler.lock.Unlock() } - }() + }(abortChannel) } func (handler *InterruptHandler) Status() InterruptStatus { @@ -170,43 +155,8 @@ func (handler *InterruptHandler) Status() InterruptStatus { defer handler.lock.Unlock() return InterruptStatus{ - Interrupted: handler.interrupted, - Channel: handler.c, - Cause: handler.interruptCause, + Level: handler.level, + Channel: handler.c, + Cause: handler.cause, } } - -func (handler *InterruptHandler) SetInterruptPlaceholderMessage(message string) { - handler.lock.Lock() - defer handler.lock.Unlock() - - handler.interruptPlaceholderMessage = message -} - -func (handler *InterruptHandler) ClearInterruptPlaceholderMessage() { - handler.lock.Lock() - defer handler.lock.Unlock() - - handler.interruptPlaceholderMessage = "" -} - -func (handler *InterruptHandler) InterruptMessageWithStackTraces() string { - handler.lock.Lock() - out := fmt.Sprintf("%s\n\n", handler.interruptCause.String()) - defer handler.lock.Unlock() - if handler.interruptCause == InterruptCauseAbortByOtherProcess { - return out - } - out += "Here's a stack trace of all running goroutines:\n" - buf := make([]byte, 8192) - for { - n := runtime.Stack(buf, true) - if n < len(buf) { - buf = buf[:n] - break - } - buf = make([]byte, 2*len(buf)) - } - out += formatter.Fi(1, "%s", string(buf)) - return out -} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index dc11129..0869bff 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -1,9 +1,11 @@ package internal import ( + "context" "fmt" "reflect" "sort" + "time" "sync" @@ -27,27 +29,38 @@ type Node struct { NodeType types.NodeType Text string - Body func() + Body func(SpecContext) CodeLocation types.CodeLocation NestingLevel int + HasContext bool - SynchronizedBeforeSuiteProc1Body func() []byte - SynchronizedBeforeSuiteAllProcsBody func([]byte) + SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte + SynchronizedBeforeSuiteProc1BodyHasContext bool + SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte) + SynchronizedBeforeSuiteAllProcsBodyHasContext bool - SynchronizedAfterSuiteAllProcsBody func() - SynchronizedAfterSuiteProc1Body func() + SynchronizedAfterSuiteAllProcsBody func(SpecContext) + SynchronizedAfterSuiteAllProcsBodyHasContext bool + SynchronizedAfterSuiteProc1Body func(SpecContext) + SynchronizedAfterSuiteProc1BodyHasContext bool - ReportEachBody func(types.SpecReport) - ReportAfterSuiteBody func(types.Report) + ReportEachBody func(types.SpecReport) + ReportSuiteBody func(types.Report) - MarkedFocus bool - MarkedPending bool - MarkedSerial bool - MarkedOrdered bool - MarkedOncePerOrdered bool - MarkedSuppressProgressReporting bool - FlakeAttempts int - Labels Labels + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedContinueOnFailure bool + MarkedOncePerOrdered bool + FlakeAttempts int + MustPassRepeatedly int + Labels Labels + PollProgressAfter time.Duration + PollProgressInterval time.Duration + NodeTimeout time.Duration + SpecTimeout time.Duration + GracePeriod time.Duration NodeIDWhereCleanupWasGenerated uint } @@ -57,6 +70,7 @@ type focusType bool type pendingType bool type serialType bool type orderedType bool +type continueOnFailureType bool type honorsOrderedType bool type suppressProgressReporting bool @@ -64,13 +78,24 @@ const Focus = focusType(true) const Pending = pendingType(true) const Serial = serialType(true) const Ordered = orderedType(true) +const ContinueOnFailure = continueOnFailureType(true) const OncePerOrdered = honorsOrderedType(true) const SuppressProgressReporting = suppressProgressReporting(true) type FlakeAttempts uint +type MustPassRepeatedly uint type Offset uint type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing type Labels []string +type PollProgressInterval time.Duration +type PollProgressAfter time.Duration +type NodeTimeout time.Duration +type SpecTimeout time.Duration +type GracePeriod time.Duration + +func (l Labels) MatchesLabelFilter(query string) bool { + return types.MustParseLabelFilter(query)(l) +} func UnionOfLabels(labels ...Labels) Labels { out := Labels{} @@ -115,14 +140,28 @@ func isDecoration(arg interface{}) bool { return true case t == reflect.TypeOf(Ordered): return true + case t == reflect.TypeOf(ContinueOnFailure): + return true case t == reflect.TypeOf(OncePerOrdered): return true case t == reflect.TypeOf(SuppressProgressReporting): return true case t == reflect.TypeOf(FlakeAttempts(0)): return true + case t == reflect.TypeOf(MustPassRepeatedly(0)): + return true case t == reflect.TypeOf(Labels{}): return true + case t == reflect.TypeOf(PollProgressInterval(0)): + return true + case t == reflect.TypeOf(PollProgressAfter(0)): + return true + case t == reflect.TypeOf(NodeTimeout(0)): + return true + case t == reflect.TypeOf(SpecTimeout(0)): + return true + case t == reflect.TypeOf(GracePeriod(0)): + return true case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): return true default: @@ -143,16 +182,23 @@ func isSliceOfDecorations(slice interface{}) bool { return true } +var contextType = reflect.TypeOf(new(context.Context)).Elem() +var specContextType = reflect.TypeOf(new(SpecContext)).Elem() + func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) { baseOffset := 2 node := Node{ - ID: UniqueNodeID(), - NodeType: nodeType, - Text: text, - Labels: Labels{}, - CodeLocation: types.NewCodeLocation(baseOffset), - NestingLevel: -1, + ID: UniqueNodeID(), + NodeType: nodeType, + Text: text, + Labels: Labels{}, + CodeLocation: types.NewCodeLocation(baseOffset), + NestingLevel: -1, + PollProgressAfter: -1, + PollProgressInterval: -1, + GracePeriod: -1, } + errors := []error{} appendError := func(err error) { if err != nil { @@ -204,21 +250,53 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy if !nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered")) } + case t == reflect.TypeOf(ContinueOnFailure): + node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType)) + if !nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure")) + } case t == reflect.TypeOf(OncePerOrdered): node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType)) if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered")) } case t == reflect.TypeOf(SuppressProgressReporting): - node.MarkedSuppressProgressReporting = bool(arg.(suppressProgressReporting)) - if nodeType.Is(types.NodeTypeContainer) { - appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SuppressProgressReporting")) - } + deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting()) case t == reflect.TypeOf(FlakeAttempts(0)): node.FlakeAttempts = int(arg.(FlakeAttempts)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts")) } + case t == reflect.TypeOf(MustPassRepeatedly(0)): + node.MustPassRepeatedly = int(arg.(MustPassRepeatedly)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "MustPassRepeatedly")) + } + case t == reflect.TypeOf(PollProgressAfter(0)): + node.PollProgressAfter = time.Duration(arg.(PollProgressAfter)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter")) + } + case t == reflect.TypeOf(PollProgressInterval(0)): + node.PollProgressInterval = time.Duration(arg.(PollProgressInterval)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval")) + } + case t == reflect.TypeOf(NodeTimeout(0)): + node.NodeTimeout = time.Duration(arg.(NodeTimeout)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout")) + } + case t == reflect.TypeOf(SpecTimeout(0)): + node.SpecTimeout = time.Duration(arg.(SpecTimeout)) + if !nodeType.Is(types.NodeTypeIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout")) + } + case t == reflect.TypeOf(GracePeriod(0)): + node.GracePeriod = time.Duration(arg.(GracePeriod)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod")) + } case t == reflect.TypeOf(Labels{}): if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) @@ -232,35 +310,85 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } case t.Kind() == reflect.Func: - if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { - if node.ReportEachBody != nil { + if nodeType.Is(types.NodeTypeContainer) { + if node.Body != nil { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true break } - - //we can trust that the function is valid because the compiler has our back here - node.ReportEachBody = arg.(func(types.SpecReport)) - break - } - - if node.Body != nil { - appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) - trackedFunctionError = true - break - } - isValid := (t.NumOut() == 0) && (t.NumIn() <= 1) && (t.NumIn() == 0 || t.In(0) == reflect.TypeOf(make(Done))) - if !isValid { - appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) - trackedFunctionError = true - break - } - if t.NumIn() == 0 { - node.Body = arg.(func()) + if t.NumOut() > 0 || t.NumIn() > 0 { + appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + body := arg.(func()) + node.Body = func(SpecContext) { body() } + } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { + if node.ReportEachBody == nil { + node.ReportEachBody = arg.(func(types.SpecReport)) + } else { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { + if node.ReportSuiteBody == nil { + node.ReportSuiteBody = arg.(func(types.Report)) + } else { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + } else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) { + if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if node.SynchronizedBeforeSuiteProc1Body == nil { + body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation)) + trackedFunctionError = true + } + node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext + } else if node.SynchronizedBeforeSuiteAllProcsBody == nil { + body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation)) + trackedFunctionError = true + } + node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext + } + } else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) { + if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if node.SynchronizedAfterSuiteAllProcsBody == nil { + node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext + } else if node.SynchronizedAfterSuiteProc1Body == nil { + node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext + } } else { - deprecationTracker.TrackDeprecation(types.Deprecations.Async(), node.CodeLocation) - deprecatedAsyncBody := arg.(func(Done)) - node.Body = func() { deprecatedAsyncBody(make(Done)) } + if node.Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg) + if node.Body == nil { + appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } } default: remainingArgs = append(remainingArgs, arg) @@ -272,13 +400,36 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } - if node.Body == nil && node.ReportEachBody == nil && !node.MarkedPending && !trackedFunctionError { + if node.MarkedContinueOnFailure && !node.MarkedOrdered { + appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)) + } + + hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext + + if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 { + appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType)) + } + + if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError { appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) } + + if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + + if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + for _, arg := range remainingArgs { appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg)) } + if node.FlakeAttempts > 0 && node.MustPassRepeatedly > 0 { + appendError(types.GinkgoErrors.InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(node.CodeLocation, nodeType)) + } + if len(errors) > 0 { return Node{}, errors } @@ -286,76 +437,157 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy return node, errors } -func NewSynchronizedBeforeSuiteNode(proc1Body func() []byte, allProcsBody func([]byte), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeSynchronizedBeforeSuite, - SynchronizedBeforeSuiteProc1Body: proc1Body, - SynchronizedBeforeSuiteAllProcsBody: allProcsBody, - CodeLocation: codeLocation, - }, nil -} +var doneType = reflect.TypeOf(make(Done)) -func NewSynchronizedAfterSuiteNode(allProcsBody func(), proc1Body func(), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeSynchronizedAfterSuite, - SynchronizedAfterSuiteAllProcsBody: allProcsBody, - SynchronizedAfterSuiteProc1Body: proc1Body, - CodeLocation: codeLocation, - }, nil -} - -func NewReportAfterSuiteNode(text string, body func(types.Report), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - Text: text, - NodeType: types.NodeTypeReportAfterSuite, - ReportAfterSuiteBody: body, - CodeLocation: codeLocation, - }, nil -} - -func NewCleanupNode(fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { - baseOffset := 2 - node := Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeCleanupInvalid, - CodeLocation: types.NewCodeLocation(baseOffset), - NestingLevel: -1, +func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) { + t := reflect.TypeOf(arg) + if t.NumOut() > 0 || t.NumIn() > 1 { + return nil, false } - remainingArgs := []interface{}{} - for _, arg := range args { + if t.NumIn() == 1 { + if t.In(0) == doneType { + deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl) + deprecatedAsyncBody := arg.(func(Done)) + return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false + } else if t.In(0).Implements(specContextType) { + return arg.(func(SpecContext)), true + } else if t.In(0).Implements(contextType) { + body := arg.(func(context.Context)) + return func(c SpecContext) { body(c) }, true + } + + return nil, false + } + + body := arg.(func()) + return func(SpecContext) { body() }, false +} + +var byteType = reflect.TypeOf([]byte{}) + +func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) { + t := reflect.TypeOf(arg) + v := reflect.ValueOf(arg) + + if t.NumOut() > 1 || t.NumIn() > 1 { + return nil, false + } else if t.NumOut() == 1 && t.Out(0) != byteType { + return nil, false + } else if t.NumIn() == 1 && !t.In(0).Implements(contextType) { + return nil, false + } + hasContext := t.NumIn() == 1 + + return func(c SpecContext) []byte { + var out []reflect.Value + if hasContext { + out = v.Call([]reflect.Value{reflect.ValueOf(c)}) + } else { + out = v.Call([]reflect.Value{}) + } + if len(out) == 1 { + return (out[0].Interface()).([]byte) + } else { + return []byte{} + } + }, hasContext +} + +func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) { + t := reflect.TypeOf(arg) + v := reflect.ValueOf(arg) + hasContext, hasByte := false, false + + if t.NumOut() > 0 || t.NumIn() > 2 { + return nil, false + } else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType { + hasContext, hasByte = true, true + } else if t.NumIn() == 1 && t.In(0).Implements(contextType) { + hasContext = true + } else if t.NumIn() == 1 && t.In(0) == byteType { + hasByte = true + } else if t.NumIn() != 0 { + return nil, false + } + + return func(c SpecContext, b []byte) { + in := []reflect.Value{} + if hasContext { + in = append(in, reflect.ValueOf(c)) + } + if hasByte { + in = append(in, reflect.ValueOf(b)) + } + v.Call(in) + }, hasContext +} + +var errInterface = reflect.TypeOf((*error)(nil)).Elem() + +func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { + decorations, remainingArgs := PartitionDecorations(args...) + baseOffset := 2 + cl := types.NewCodeLocation(baseOffset) + finalArgs := []interface{}{} + for _, arg := range decorations { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(Offset(0)): - node.CodeLocation = types.NewCodeLocation(baseOffset + int(arg.(Offset))) + cl = types.NewCodeLocation(baseOffset + int(arg.(Offset))) case t == reflect.TypeOf(types.CodeLocation{}): - node.CodeLocation = arg.(types.CodeLocation) + cl = arg.(types.CodeLocation) default: - remainingArgs = append(remainingArgs, arg) + finalArgs = append(finalArgs, arg) } } + finalArgs = append(finalArgs, cl) if len(remainingArgs) == 0 { - return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)} + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)} } + callback := reflect.ValueOf(remainingArgs[0]) - if !(callback.Kind() == reflect.Func && callback.Type().NumOut() <= 1) { - return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)} + if !(callback.Kind() == reflect.Func) { + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)} } + callArgs := []reflect.Value{} for _, arg := range remainingArgs[1:] { callArgs = append(callArgs, reflect.ValueOf(arg)) } - cl := node.CodeLocation - node.Body = func() { - out := callback.Call(callArgs) - if len(out) == 1 && !out[0].IsNil() { - fail(fmt.Sprintf("DeferCleanup callback returned error: %v", out[0]), cl) + + hasContext := false + t := callback.Type() + if t.NumIn() > 0 { + if t.In(0).Implements(specContextType) { + hasContext = true + } else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) { + hasContext = true } } - return node, nil + handleFailure := func(out []reflect.Value) { + if len(out) == 0 { + return + } + last := out[len(out)-1] + if last.Type().Implements(errInterface) && !last.IsNil() { + fail(fmt.Sprintf("DeferCleanup callback returned error: %v", last), cl) + } + } + + if hasContext { + finalArgs = append(finalArgs, func(c SpecContext) { + out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...)) + handleFailure(out) + }) + } else { + finalArgs = append(finalArgs, func() { + out := callback.Call(callArgs) + handleFailure(out) + }) + } + + return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...) } func (n Node) IsZero() bool { @@ -643,6 +875,26 @@ func (n Nodes) FirstNodeMarkedOrdered() Node { return Node{} } +func (n Nodes) GetMaxFlakeAttempts() int { + maxFlakeAttempts := 0 + for i := range n { + if n[i].FlakeAttempts > 0 { + maxFlakeAttempts = n[i].FlakeAttempts + } + } + return maxFlakeAttempts +} + +func (n Nodes) GetMaxMustPassRepeatedly() int { + maxMustPassRepeatedly := 0 + for i := range n { + if n[i].MustPassRepeatedly > 0 { + maxMustPassRepeatedly = n[i].MustPassRepeatedly + } + } + return maxMustPassRepeatedly +} + func unrollInterfaceSlice(args interface{}) []interface{} { v := reflect.ValueOf(args) if v.Kind() != reflect.Slice { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go index 161be82..7ed43c7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -7,6 +7,58 @@ import ( "github.com/onsi/ginkgo/v2/types" ) +type SortableSpecs struct { + Specs Specs + Indexes []int +} + +func NewSortableSpecs(specs Specs) *SortableSpecs { + indexes := make([]int, len(specs)) + for i := range specs { + indexes[i] = i + } + return &SortableSpecs{ + Specs: specs, + Indexes: indexes, + } +} +func (s *SortableSpecs) Len() int { return len(s.Indexes) } +func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] } +func (s *SortableSpecs) Less(i, j int) bool { + a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]] + + firstOrderedA := a.Nodes.FirstNodeMarkedOrdered() + firstOrderedB := b.Nodes.FirstNodeMarkedOrdered() + if firstOrderedA.ID == firstOrderedB.ID && !firstOrderedA.IsZero() { + // strictly preserve order in ordered containers. ID will track this as IDs are generated monotonically + return a.FirstNodeWithType(types.NodeTypeIt).ID < b.FirstNodeWithType(types.NodeTypeIt).ID + } + + aCLs := a.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations() + bCLs := b.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations() + for i := 0; i < len(aCLs) && i < len(bCLs); i++ { + aCL, bCL := aCLs[i], bCLs[i] + if aCL.FileName < bCL.FileName { + return true + } else if aCL.FileName > bCL.FileName { + return false + } + if aCL.LineNumber < bCL.LineNumber { + return true + } else if aCL.LineNumber > bCL.LineNumber { + return false + } + } + // either everything is equal or we have different lengths of CLs + if len(aCLs) < len(bCLs) { + return true + } else if len(aCLs) > len(bCLs) { + return false + } + // ok, now we are sure everything was equal. so we use the spec text to break ties + return a.Text() < b.Text() +} + type GroupedSpecIndices []SpecIndices type SpecIndices []int @@ -28,12 +80,17 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // Seed a new random source based on thee configured random seed. r := rand.New(rand.NewSource(suiteConfig.RandomSeed)) - // first break things into execution groups + // first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs. + sortableSpecs := NewSortableSpecs(specs) + sort.Sort(sortableSpecs) + + // then we break things into execution groups // a group represents a single unit of execution and is a collection of SpecIndices // usually a group is just a single spec, however ordered containers must be preserved as a single group executionGroupIDs := []uint{} executionGroups := map[uint]SpecIndices{} - for idx, spec := range specs { + for _, idx := range sortableSpecs.Indexes { + spec := specs[idx] groupNode := spec.Nodes.FirstNodeMarkedOrdered() if groupNode.IsZero() { groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt) @@ -48,7 +105,6 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs shufflableGroupingIDs := []uint{} shufflableGroupingIDToGroupIDs := map[uint][]uint{} - shufflableGroupingsIDToSortKeys := map[uint]string{} // for each execution group we're going to have to pick a node to represent how the // execution group is grouped for shuffling: @@ -57,7 +113,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, nodeTypesToShuffle = types.NodeTypeIt } - //so, fo reach execution group: + //so, for each execution group: for _, groupID := range executionGroupIDs { // pick out a representative spec representativeSpec := specs[executionGroups[groupID][0]] @@ -72,22 +128,9 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 { // record the shuffleable group ID shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID) - // and record the sort key to use - shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String() } } - // now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id - sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool { - keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]] - keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]] - if keyA == keyB { - return shufflableGroupingIDs[i] < shufflableGroupingIDs[j] - } else { - return keyA < keyB - } - }) - // now we permute the sorted shufflable grouping IDs and build the ordered Groups orderedGroups := GroupedSpecIndices{} permutation := r.Perm(len(shufflableGroupingIDs)) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go index b59918a..4a1c094 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go @@ -143,7 +143,7 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() { go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown) } - // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is tring to log to stdout and stderr) + // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr) // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running interceptor.pipe = <-interceptor.pipeChannel diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go index e875001..f5ae15b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -28,7 +28,7 @@ func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.Fil // And then wrap the clone file descriptors in files. // One benefit of this (that we don't use yet) is that we can actually write - // to these files to emit output to the console evne though we're intercepting output + // to these files to emit output to the console even though we're intercepting output stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone") stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone") diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go index 7d5cb0b..b3cd642 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -42,6 +42,8 @@ type Client interface { PostSuiteWillBegin(report types.Report) error PostDidRun(report types.SpecReport) error PostSuiteDidEnd(report types.Report) error + PostReportBeforeSuiteCompleted(state types.SpecState) error + BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) BlockUntilNonprimaryProcsHaveFinished() error @@ -49,6 +51,7 @@ type Client interface { FetchNextCounter() (int, error) PostAbort() error ShouldAbort() bool + PostEmitProgressReport(report types.ProgressReport) error Write(p []byte) (int, error) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go index d076d5d..6547c7a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -94,6 +94,23 @@ func (client *httpClient) PostSuiteDidEnd(report types.Report) error { return client.post("/suite-did-end", report) } +func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.post("/progress-report", report) +} + +func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.post("/report-before-suite-completed", state) +} + +func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("/report-before-suite-state", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go index ca1dcdc..d2c71ab 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -26,7 +26,7 @@ type httpServer struct { handler *ServerHandler } -//Create a new server, automatically selecting a port +// Create a new server, automatically selecting a port func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -38,7 +38,7 @@ func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, }, nil } -//Start the server. You don't need to `go s.Start()`, just `s.Start()` +// Start the server. You don't need to `go s.Start()`, just `s.Start()` func (server *httpServer) Start() { httpServer := &http.Server{} mux := http.NewServeMux() @@ -49,8 +49,11 @@ func (server *httpServer) Start() { mux.HandleFunc("/did-run", server.didRun) mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) mux.HandleFunc("/emit-output", server.emitOutput) + mux.HandleFunc("/progress-report", server.emitProgressReport) //synchronization endpoints + mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted) + mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState) mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) @@ -62,12 +65,12 @@ func (server *httpServer) Start() { go httpServer.Serve(server.listener) } -//Stop the server +// Stop the server func (server *httpServer) Close() { server.listener.Close() } -//The address the server can be reached it. Pass this into the `ForwardingReporter`. +// The address the server can be reached it. Pass this into the `ForwardingReporter`. func (server *httpServer) Address() string { return "http://" + server.listener.Addr().String() } @@ -92,7 +95,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) { // Streaming Endpoints // -//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { defer request.Body.Close() if json.NewDecoder(request.Body).Decode(object) != nil { @@ -155,6 +158,31 @@ func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.R server.handleError(server.handler.EmitOutput(output, &n), writer) } +func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) { + var report types.ProgressReport + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if !server.decode(writer, request, &state) { + return + } + + server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) { + return + } + json.NewEncoder(writer).Encode(state) +} + func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { var beforeSuiteState BeforeSuiteState if !server.decode(writer, request, &beforeSuiteState) { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go index 4e83b09..59e8e6f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -72,6 +72,23 @@ func (client *rpcClient) Write(p []byte) (int, error) { return n, err } +func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.client.Call("Server.EmitProgressReport", report, voidReceiver) +} + +func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver) +} + +func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("Server.ReportBeforeSuiteState", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go index ca471cf..a6d9879 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -18,16 +18,17 @@ var voidSender Void // It handles all the business logic to avoid duplication between the two servers type ServerHandler struct { - done chan interface{} - outputDestination io.Writer - reporter reporters.Reporter - alives []func() bool - lock *sync.Mutex - beforeSuiteState BeforeSuiteState - parallelTotal int - counter int - counterLock *sync.Mutex - shouldAbort bool + done chan interface{} + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + reportBeforeSuiteState types.SpecState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool numSuiteDidBegins int numSuiteDidEnds int @@ -37,11 +38,12 @@ type ServerHandler struct { func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { return &ServerHandler{ - reporter: reporter, - lock: &sync.Mutex{}, - counterLock: &sync.Mutex{}, - alives: make([]func() bool, parallelTotal), - beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + parallelTotal: parallelTotal, outputDestination: os.Stdout, done: make(chan interface{}), @@ -108,6 +110,13 @@ func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { return err } +func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reporter.EmitProgressReport(report) + return nil +} + func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { handler.lock.Lock() defer handler.lock.Unlock() @@ -133,6 +142,29 @@ func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { return true } +func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reportBeforeSuiteState = reportBeforeSuiteState + + return nil +} + +func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.reportBeforeSuiteState == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *reportBeforeSuiteState = handler.reportBeforeSuiteState + return nil +} + func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go new file mode 100644 index 0000000..11269cf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -0,0 +1,287 @@ +package internal + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "os/signal" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +var _SOURCE_CACHE = map[string][]string{} + +type ProgressSignalRegistrar func(func()) context.CancelFunc + +func RegisterForProgressSignal(handler func()) context.CancelFunc { + signalChannel := make(chan os.Signal, 1) + if len(PROGRESS_SIGNALS) > 0 { + signal.Notify(signalChannel, PROGRESS_SIGNALS...) + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + for { + select { + case <-signalChannel: + handler() + case <-ctx.Done(): + signal.Stop(signalChannel) + return + } + } + }() + + return cancel +} + +type ProgressStepCursor struct { + Text string + CodeLocation types.CodeLocation + StartTime time.Time +} + +func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) { + pr := types.ProgressReport{ + ParallelProcess: report.ParallelProcess, + RunningInParallel: isRunningInParallel, + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + LeafNodeText: report.LeafNodeText, + LeafNodeLocation: report.LeafNodeLocation, + SpecStartTime: report.StartTime, + + CurrentNodeType: currentNode.NodeType, + CurrentNodeText: currentNode.Text, + CurrentNodeLocation: currentNode.CodeLocation, + CurrentNodeStartTime: currentNodeStartTime, + + CurrentStepText: currentStep.Message, + CurrentStepLocation: currentStep.CodeLocation, + CurrentStepStartTime: currentStep.TimelineLocation.Time, + + AdditionalReports: additionalReports, + + CapturedGinkgoWriterOutput: gwOutput, + TimelineLocation: timelineLocation, + } + + goroutines, err := extractRunningGoroutines() + if err != nil { + return pr, err + } + pr.Goroutines = goroutines + + // now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest: + packagesOfInterest := map[string]bool{} + packageFromFilename := func(filename string) string { + return filepath.Dir(filename) + } + addPackageFor := func(filename string) { + if filename != "" { + packagesOfInterest[packageFromFilename(filename)] = true + } + } + isPackageOfInterest := func(filename string) bool { + stackPackage := packageFromFilename(filename) + for packageOfInterest := range packagesOfInterest { + if strings.HasPrefix(stackPackage, packageOfInterest) { + return true + } + } + return false + } + for _, location := range report.ContainerHierarchyLocations { + addPackageFor(location.FileName) + } + addPackageFor(report.LeafNodeLocation.FileName) + addPackageFor(currentNode.CodeLocation.FileName) + addPackageFor(currentStep.CodeLocation.FileName) + + //First, we find the SpecGoroutine - this will be the goroutine that includes `runNode` + specGoRoutineIdx := -1 + runNodeFunctionCallIdx := -1 +OUTER: + for goroutineIdx, goroutine := range pr.Goroutines { + for functionCallIdx, functionCall := range goroutine.Stack { + if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") { + specGoRoutineIdx = goroutineIdx + runNodeFunctionCallIdx = functionCallIdx + break OUTER + } + } + } + + //Now, we find the first non-Ginkgo function call + if specGoRoutineIdx > -1 { + for runNodeFunctionCallIdx >= 0 { + fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function + file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename + // these are all things that could potentially happen from within ginkgo + if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") { + runNodeFunctionCallIdx-- + continue + } + if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") { + + } + //found it! lets add its package of interest + addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename) + break + } + } + + ginkgoEntryPointIdx := -1 +OUTER_GINKGO_ENTRY_POINT: + for goroutineIdx, goroutine := range pr.Goroutines { + for _, functionCall := range goroutine.Stack { + if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") { + ginkgoEntryPointIdx = goroutineIdx + break OUTER_GINKGO_ENTRY_POINT + } + } + } + + // Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest` + // Any goroutines with highlighted lines end up in the HighlightGoRoutines + for goroutineIdx, goroutine := range pr.Goroutines { + if goroutineIdx == ginkgoEntryPointIdx { + continue + } + if goroutineIdx == specGoRoutineIdx { + pr.Goroutines[goroutineIdx].IsSpecGoroutine = true + } + for functionCallIdx, functionCall := range goroutine.Stack { + if isPackageOfInterest(functionCall.Filename) { + goroutine.Stack[functionCallIdx].Highlight = true + goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots) + } + } + } + + if !includeAll { + goroutines := []types.Goroutine{pr.SpecGoroutine()} + goroutines = append(goroutines, pr.HighlightedGoroutines()...) + pr.Goroutines = goroutines + } + + return pr, nil +} + +func extractRunningGoroutines() ([]types.Goroutine, error) { + var stack []byte + for size := 64 * 1024; ; size *= 2 { + stack = make([]byte, size) + if n := runtime.Stack(stack, true); n < size { + stack = stack[:n] + break + } + } + r := bufio.NewReader(bytes.NewReader(stack)) + out := []types.Goroutine{} + idx := -1 + for { + line, err := r.ReadString('\n') + if err == io.EOF { + break + } + + line = strings.TrimSuffix(line, "\n") + + //skip blank lines + if line == "" { + continue + } + + //parse headers for new goroutine frames + if strings.HasPrefix(line, "goroutine") { + out = append(out, types.Goroutine{}) + idx = len(out) - 1 + + line = strings.TrimPrefix(line, "goroutine ") + line = strings.TrimSuffix(line, ":") + fields := strings.SplitN(line, " ", 2) + if len(fields) != 2 { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line)) + } + out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1])) + } + + out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]") + continue + } + + //if we are here we must be at a function call entry in the stack + functionCall := types.FunctionCall{ + Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by' + } + + line, err = r.ReadString('\n') + line = strings.TrimSuffix(line, "\n") + if err == io.EOF { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function)) + } + line = strings.TrimLeft(line, " \t") + delimiterIdx := strings.LastIndex(line, ":") + if delimiterIdx == -1 { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line)) + } + functionCall.Filename = line[:delimiterIdx] + line = strings.Split(line[delimiterIdx+1:], " ")[0] + lineNumber, err := strconv.ParseInt(line, 10, 64) + functionCall.Line = int(lineNumber) + if err != nil { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error())) + } + out[idx].Stack = append(out[idx].Stack, functionCall) + } + + return out, nil +} + +func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) { + if filename == "" { + return []string{}, 0 + } + + var lines []string + var ok bool + if lines, ok = _SOURCE_CACHE[filename]; !ok { + sourceRoots := []string{""} + sourceRoots = append(sourceRoots, configuredSourceRoots...) + var data []byte + var err error + var found bool + for _, root := range sourceRoots { + data, err = os.ReadFile(filepath.Join(root, filename)) + if err == nil { + found = true + break + } + } + if !found { + return []string{}, 0 + } + lines = strings.Split(string(data), "\n") + _SOURCE_CACHE[filename] = lines + } + + startIndex := lineNumber - span - 1 + endIndex := startIndex + span + span + 1 + if startIndex < 0 { + startIndex = 0 + } + if endIndex > len(lines) { + endIndex = len(lines) + } + highlightIndex := lineNumber - 1 - startIndex + return lines[startIndex:endIndex], highlightIndex +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go new file mode 100644 index 0000000..61e0ed3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go @@ -0,0 +1,11 @@ +//go:build freebsd || openbsd || netbsd || darwin || dragonfly +// +build freebsd openbsd netbsd darwin dragonfly + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go new file mode 100644 index 0000000..ad30de4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go @@ -0,0 +1,11 @@ +//go:build linux || solaris +// +build linux solaris + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go new file mode 100644 index 0000000..0eca251 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package internal + +import "os" + +var PROGRESS_SIGNALS = []os.Signal{} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go new file mode 100644 index 0000000..2c6e260 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go @@ -0,0 +1,79 @@ +package internal + +import ( + "context" + "sort" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +type ProgressReporterManager struct { + lock *sync.Mutex + progressReporters map[int]func() string + prCounter int +} + +func NewProgressReporterManager() *ProgressReporterManager { + return &ProgressReporterManager{ + progressReporters: map[int]func() string{}, + lock: &sync.Mutex{}, + } +} + +func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() { + prm.lock.Lock() + defer prm.lock.Unlock() + prm.prCounter += 1 + prCounter := prm.prCounter + prm.progressReporters[prCounter] = reporter + + return func() { + prm.lock.Lock() + defer prm.lock.Unlock() + delete(prm.progressReporters, prCounter) + } +} + +func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string { + prm.lock.Lock() + keys := []int{} + for key := range prm.progressReporters { + keys = append(keys, key) + } + sort.Ints(keys) + reporters := []func() string{} + for _, key := range keys { + reporters = append(reporters, prm.progressReporters[key]) + } + prm.lock.Unlock() + + if len(reporters) == 0 { + return nil + } + out := []string{} + for _, reporter := range reporters { + reportC := make(chan string, 1) + go func() { + defer func() { + e := recover() + if e != nil { + failer.Panic(types.NewCodeLocationWithStackTrace(1), e) + reportC <- "failed to query attached progress reporter" + } + }() + reportC <- reporter() + }() + var report string + select { + case report = <-reportC: + case <-ctx.Done(): + return out + } + if strings.TrimSpace(report) != "" { + out = append(out, report) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go index 74199f3..cc351a3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go @@ -1,7 +1,6 @@ package internal import ( - "reflect" "time" "github.com/onsi/ginkgo/v2/types" @@ -13,20 +12,20 @@ func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (Re out := ReportEntry{ Visibility: types.ReportEntryVisibilityAlways, Name: name, - Time: time.Now(), Location: cl, + Time: time.Now(), } var didSetValue = false for _, arg := range args { - switch reflect.TypeOf(arg) { - case reflect.TypeOf(types.ReportEntryVisibilityAlways): - out.Visibility = arg.(types.ReportEntryVisibility) - case reflect.TypeOf(types.CodeLocation{}): - out.Location = arg.(types.CodeLocation) - case reflect.TypeOf(Offset(0)): - out.Location = types.NewCodeLocation(2 + int(arg.(Offset))) - case reflect.TypeOf(out.Time): - out.Time = arg.(time.Time) + switch x := arg.(type) { + case types.ReportEntryVisibility: + out.Visibility = x + case types.CodeLocation: + out.Location = x + case Offset: + out.Location = types.NewCodeLocation(2 + int(x)) + case time.Time: + out.Time = x default: if didSetValue { return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go index 92072ed..7c4ee5b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/spec.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go @@ -2,6 +2,7 @@ package internal import ( "strings" + "time" "github.com/onsi/ginkgo/v2/types" ) @@ -40,6 +41,21 @@ func (s Spec) FlakeAttempts() int { return flakeAttempts } +func (s Spec) MustPassRepeatedly() int { + mustPassRepeatedly := 0 + for i := range s.Nodes { + if s.Nodes[i].MustPassRepeatedly > 0 { + mustPassRepeatedly = s.Nodes[i].MustPassRepeatedly + } + } + + return mustPassRepeatedly +} + +func (s Spec) SpecTimeout() time.Duration { + return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout +} + type Specs []Spec func (s Specs) HasAnySpecsMarkedPending() bool { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go new file mode 100644 index 0000000..2515b84 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -0,0 +1,47 @@ +package internal + +import ( + "context" + + "github.com/onsi/ginkgo/v2/types" +) + +type SpecContext interface { + context.Context + + SpecReport() types.SpecReport + AttachProgressReporter(func() string) func() +} + +type specContext struct { + context.Context + *ProgressReporterManager + + cancel context.CancelFunc + + suite *Suite +} + +/* +SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context. + +Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses. + +This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation. +*/ +func NewSpecContext(suite *Suite) *specContext { + ctx, cancel := context.WithCancel(context.Background()) + sc := &specContext{ + cancel: cancel, + suite: suite, + ProgressReporterManager: NewProgressReporterManager(), + } + ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo + sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies + + return sc +} + +func (sc *specContext) SpecReport() types.SpecReport { + return sc.suite.CurrentSpecReport() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index e27a29d..a1dbd4c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -5,11 +5,11 @@ import ( "sync" "time" - "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal/interrupt_handler" "github.com/onsi/ginkgo/v2/internal/parallel_support" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/net/context" ) type Phase uint @@ -20,10 +20,14 @@ const ( PhaseRun ) +var PROGRESS_REPORTER_DEADLING = 5 * time.Second + type Suite struct { tree *TreeNode topLevelContainers Nodes + *ProgressReporterManager + phase Phase suiteNodes Nodes @@ -35,21 +39,41 @@ type Suite struct { outputInterceptor OutputInterceptor interruptHandler interrupt_handler.InterruptHandlerInterface config types.SuiteConfig + deadline time.Time - skipAll bool - report types.Report - currentSpecReport types.SpecReport - currentSpecReportUserAccessLock *sync.Mutex - currentNode Node + skipAll bool + report types.Report + currentSpecReport types.SpecReport + currentNode Node + currentNodeStartTime time.Time + + currentSpecContext *specContext + + currentByStep types.SpecEvent + timelineOrder int + + /* + We don't need to lock around all operations. Just those that *could* happen concurrently. + + Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in. + + However, there are some operations that can happen concurrently: + + - AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent. + - generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself. + */ + selectiveLock *sync.Mutex client parallel_support.Client } func NewSuite() *Suite { return &Suite{ - tree: &TreeNode{}, - phase: PhaseBuildTopLevel, - currentSpecReportUserAccessLock: &sync.Mutex{}, + tree: &TreeNode{}, + phase: PhaseBuildTopLevel, + ProgressReporterManager: NewProgressReporterManager(), + + selectiveLock: &sync.Mutex{}, } } @@ -66,7 +90,7 @@ func (suite *Suite) BuildTree() error { return nil } -func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) { +func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { if suite.phase != PhaseBuildTree { panic("cannot run before building the tree = call suite.BuildTree() first") } @@ -83,8 +107,16 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string suite.interruptHandler = interruptHandler suite.config = suiteConfig + if suite.config.Timeout > 0 { + suite.deadline = time.Now().Add(suite.config.Timeout) + } + + cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal) + success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + cancelProgressHandler() + return success, hasProgrammaticFocus } @@ -103,7 +135,7 @@ func (suite *Suite) PushNode(node Node) error { return suite.pushCleanupNode(node) } - if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) { + if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { return suite.pushSuiteNode(node) } @@ -125,6 +157,13 @@ func (suite *Suite) PushNode(node Node) error { } } + if node.MarkedContinueOnFailure { + firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() + if !firstOrderedNode.IsZero() { + return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation) + } + } + if node.NodeType == types.NodeTypeContainer { // During PhaseBuildTopLevel we only track the top level containers without entering them // We only enter the top level container nodes during PhaseBuildTree @@ -146,7 +185,7 @@ func (suite *Suite) PushNode(node Node) error { err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation) } }() - node.Body() + node.Body(nil) return err }() suite.tree = parentTree @@ -196,7 +235,7 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeType = types.NodeTypeCleanupAfterSuite case types.NodeTypeBeforeAll, types.NodeTypeAfterAll: node.NodeType = types.NodeTypeCleanupAfterAll - case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite: + case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite: return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType) case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite: return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation) @@ -211,12 +250,73 @@ func (suite *Suite) pushCleanupNode(node Node) error { return nil } +func (suite *Suite) generateTimelineLocation() types.TimelineLocation { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + + suite.timelineOrder += 1 + return types.TimelineLocation{ + Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(), + Order: suite.timelineOrder, + Time: time.Now(), + } +} + +func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent { + event.TimelineLocation = suite.generateTimelineLocation() + suite.selectiveLock.Lock() + suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) + suite.selectiveLock.Unlock() + suite.reporter.EmitSpecEvent(event) + return event +} + +func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) { + event := startEvent + event.SpecEventType = eventType + event.TimelineLocation = suite.generateTimelineLocation() + event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time) + suite.selectiveLock.Lock() + suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) + suite.selectiveLock.Unlock() + suite.reporter.EmitSpecEvent(event) +} + +func (suite *Suite) By(text string, callback ...func()) error { + cl := types.NewCodeLocation(2) + if suite.phase != PhaseRun { + return types.GinkgoErrors.ByNotDuringRunPhase(cl) + } + + event := suite.handleSpecEvent(types.SpecEvent{ + SpecEventType: types.SpecEventByStart, + CodeLocation: cl, + Message: text, + }) + suite.selectiveLock.Lock() + suite.currentByStep = event + suite.selectiveLock.Unlock() + + if len(callback) == 1 { + defer func() { + suite.selectiveLock.Lock() + suite.currentByStep = types.SpecEvent{} + suite.selectiveLock.Unlock() + suite.handleSpecEventEnd(types.SpecEventByEnd, event) + }() + callback[0]() + } else if len(callback) > 1 { + panic("just one callback per By, please") + } + return nil +} + /* - Spec Running methods - used during PhaseRun +Spec Running methods - used during PhaseRun */ func (suite *Suite) CurrentSpecReport() types.SpecReport { - suite.currentSpecReportUserAccessLock.Lock() - defer suite.currentSpecReportUserAccessLock.Unlock() + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() report := suite.currentSpecReport if suite.writer != nil { report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) @@ -227,15 +327,59 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport { } func (suite *Suite) AddReportEntry(entry ReportEntry) error { - suite.currentSpecReportUserAccessLock.Lock() - defer suite.currentSpecReportUserAccessLock.Unlock() if suite.phase != PhaseRun { return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location) } + entry.TimelineLocation = suite.generateTimelineLocation() + entry.Time = entry.TimelineLocation.Time + suite.selectiveLock.Lock() suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry) + suite.selectiveLock.Unlock() + suite.reporter.EmitReportEntry(entry) return nil } +func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport { + timelineLocation := suite.generateTimelineLocation() + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + + deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING) + defer cancel() + var additionalReports []string + if suite.currentSpecContext != nil { + additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...) + } + additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...) + gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes()) + pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport) + + if err != nil { + fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error()) + } + return pr +} + +func (suite *Suite) handleProgressSignal() { + report := suite.generateProgressReport(false) + report.Message = "{{bold}}You've requested a progress report:{{/}}" + suite.emitProgressReport(report) +} + +func (suite *Suite) emitProgressReport(report types.ProgressReport) { + suite.selectiveLock.Lock() + suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput()) + suite.selectiveLock.Unlock() + + suite.reporter.EmitProgressReport(report) + if suite.isRunningInParallel() { + err := suite.client.PostEmitProgressReport(report) + if err != nil { + fmt.Println(err.Error()) + } + } +} + func (suite *Suite) isRunningInParallel() bool { return suite.config.ParallelTotal > 1 } @@ -280,7 +424,13 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } suite.report.SuiteSucceeded = true - suite.runBeforeSuite(numSpecsThatWillBeRun) + + suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite) + + ranBeforeSuite := suite.report.SuiteSucceeded + if suite.report.SuiteSucceeded { + suite.runBeforeSuite(numSpecsThatWillBeRun) + } if suite.report.SuiteSucceeded { groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config) @@ -319,19 +469,23 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } } - suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + if ranBeforeSuite { + suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + } interruptStatus := suite.interruptHandler.Status() - if interruptStatus.Interrupted { + if interruptStatus.Interrupted() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String()) suite.report.SuiteSucceeded = false } suite.report.EndTime = time.Now() suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime) - - if suite.config.ParallelProcess == 1 { - suite.runReportAfterSuite() + if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed") + suite.report.SuiteSucceeded = false } + + suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite) suite.reporter.SuiteDidEnd(suite.report) if suite.isRunningInParallel() { suite.client.PostSuiteDidEnd(suite.report) @@ -341,16 +495,19 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { - interruptStatus := suite.interruptHandler.Status() beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) - if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 { + if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { + suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: beforeSuiteNode.NodeType, - LeafNodeLocation: beforeSuiteNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: beforeSuiteNode.NodeType, + LeafNodeLocation: beforeSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } + suite.selectiveLock.Unlock() + suite.reporter.WillRun(suite.currentSpecReport) - suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel) + suite.runSuiteNode(beforeSuiteNode) if suite.currentSpecReport.State.Is(types.SpecStateSkipped) { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite") suite.skipAll = true @@ -362,45 +519,39 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { + suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: afterSuiteNode.NodeType, - LeafNodeLocation: afterSuiteNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: afterSuiteNode.NodeType, + LeafNodeLocation: afterSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } + suite.selectiveLock.Unlock() + suite.reporter.WillRun(suite.currentSpecReport) - suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel) + suite.runSuiteNode(afterSuiteNode) suite.processCurrentSpecReport() } afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse() if len(afterSuiteCleanup) > 0 { for _, cleanupNode := range afterSuiteCleanup { + suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: cleanupNode.NodeType, - LeafNodeLocation: cleanupNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: cleanupNode.NodeType, + LeafNodeLocation: cleanupNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } + suite.selectiveLock.Unlock() + suite.reporter.WillRun(suite.currentSpecReport) - suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel) + suite.runSuiteNode(cleanupNode) suite.processCurrentSpecReport() } } } -func (suite *Suite) runReportAfterSuite() { - for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) { - suite.currentSpecReport = types.SpecReport{ - LeafNodeType: node.NodeType, - LeafNodeLocation: node.CodeLocation, - LeafNodeText: node.Text, - ParallelProcess: suite.config.ParallelProcess, - } - suite.reporter.WillRun(suite.currentSpecReport) - suite.runReportAfterSuiteNode(node, suite.report) - suite.processCurrentSpecReport() - } -} - func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { nodes := spec.Nodes.WithType(nodeType) if nodeType == types.NodeTypeReportAfterEach { @@ -417,16 +568,11 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() report := suite.currentSpecReport - nodes[i].Body = func() { + nodes[i].Body = func(SpecContext) { nodes[i].ReportEachBody(report) } - suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS, - "{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node. To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}", - nodeType, nodeType, nodeType, - nodes[i].CodeLocation, - )) - state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i])) - suite.interruptHandler.ClearInterruptPlaceholderMessage() + state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) + // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state. // Also, if the reporter is every aborted - always override the state to propagate the abort if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) { @@ -438,7 +584,7 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { } } -func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { +func (suite *Suite) runSuiteNode(node Node) { if suite.config.DryRun { suite.currentSpecReport.State = types.SpecStatePassed return @@ -451,13 +597,13 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { var err error switch node.NodeType { case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite: - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") case types.NodeTypeCleanupAfterSuite: if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 { err = suite.client.BlockUntilNonprimaryProcsHaveFinished() } if err == nil { - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") } case types.NodeTypeSynchronizedBeforeSuite: var data []byte @@ -467,8 +613,9 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { suite.outputInterceptor.StopInterceptingAndReturnOutput() suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) } - node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() } - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) } + node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") if suite.config.ParallelTotal > 1 { suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() suite.outputInterceptor.StartInterceptingOutput() @@ -485,19 +632,21 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { switch proc1State { case types.SpecStatePassed: runAllProcs = true - case types.SpecStateFailed, types.SpecStatePanicked: + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1() case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped: suite.currentSpecReport.State = proc1State } } if runAllProcs { - node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) } - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) } + node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") } case types.NodeTypeSynchronizedAfterSuite: node.Body = node.SynchronizedAfterSuiteAllProcsBody - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") if suite.config.ParallelProcess == 1 { if suite.config.ParallelTotal > 1 { err = suite.client.BlockUntilNonprimaryProcsHaveFinished() @@ -509,7 +658,8 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { } node.Body = node.SynchronizedAfterSuiteProc1Body - state, failure := suite.runNode(node, interruptChannel, "") + node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext + state, failure := suite.runNode(node, time.Time{}, "") if suite.currentSpecReport.State.Is(types.SpecStatePassed) { suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure } @@ -519,63 +669,122 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) } suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() - - return } -func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) { +func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) { + nodes := suite.suiteNodes.WithType(nodeType) + // only run ReportAfterSuite on proc 1 + if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 { + return + } + // if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed + if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 { + state, err := suite.client.BlockUntilReportBeforeSuiteCompleted() + if err != nil || state.Is(types.SpecStateFailed) { + suite.report.SuiteSucceeded = false + } + return + } + + for _, node := range nodes { + suite.selectiveLock.Lock() + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: node.NodeType, + LeafNodeLocation: node.CodeLocation, + LeafNodeText: node.Text, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), + } + suite.selectiveLock.Unlock() + + suite.reporter.WillRun(suite.currentSpecReport) + suite.runReportSuiteNode(node, suite.report) + suite.processCurrentSpecReport() + } + + // if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done + if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 { + if suite.report.SuiteSucceeded { + suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed) + } else { + suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed) + } + } +} + +func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() suite.currentSpecReport.StartTime = time.Now() - if suite.config.ParallelTotal > 1 { + // if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and + // (b) always fetch the latest report as prior ReportAfterSuites will contribute to it + if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() { aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport() if err != nil { suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) return } report = report.Add(aggregatedReport) } - node.Body = func() { node.ReportAfterSuiteBody(report) } - suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS, - "{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node. To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}", - node.CodeLocation, - )) - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "") - suite.interruptHandler.ClearInterruptPlaceholderMessage() + node.Body = func(SpecContext) { node.ReportSuiteBody(report) } + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput() - - return } -func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) { +func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) { if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node) } + interruptStatus := suite.interruptHandler.Status() + if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { + return types.SpecStateSkipped, types.Failure{} + } + if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) { + return types.SpecStateSkipped, types.Failure{} + } + if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) { + return types.SpecStateSkipped, types.Failure{} + } + + suite.selectiveLock.Lock() suite.currentNode = node + suite.currentNodeStartTime = time.Now() + suite.currentByStep = types.SpecEvent{} + suite.selectiveLock.Unlock() defer func() { + suite.selectiveLock.Lock() suite.currentNode = Node{} + suite.currentNodeStartTime = time.Time{} + suite.selectiveLock.Unlock() }() - if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting { - if text == "" { - text = "TOP-LEVEL" - } - s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String()) - suite.writer.Write([]byte(s)) + if text == "" { + text = "TOP-LEVEL" } + event := suite.handleSpecEvent(types.SpecEvent{ + SpecEventType: types.SpecEventNodeStart, + NodeType: node.NodeType, + Message: text, + CodeLocation: node.CodeLocation, + }) + defer func() { + suite.handleSpecEventEnd(types.SpecEventNodeEnd, event) + }() var failure types.Failure failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation @@ -586,6 +795,54 @@ func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text s } else { failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1 } + var outcome types.SpecState + + gracePeriod := suite.config.GracePeriod + if node.GracePeriod >= 0 { + gracePeriod = node.GracePeriod + } + + now := time.Now() + deadline := suite.deadline + timeoutInPlay := "suite" + if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) { + deadline = specDeadline + timeoutInPlay = "spec" + } + if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) { + deadline = now.Add(node.NodeTimeout) + timeoutInPlay = "node" + } + if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { + //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't + if node.NodeTimeout > 0 { + deadline = now.Add(node.NodeTimeout) + timeoutInPlay = "node" + } else { + deadline = now.Add(gracePeriod) + timeoutInPlay = "grace period" + } + } + + if !node.HasContext { + // this maps onto the pre-context behavior: + // - an interrupted node exits immediately. with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt + // - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted + gracePeriod = 0 + } + + sc := NewSpecContext(suite) + defer sc.cancel() + + suite.selectiveLock.Lock() + suite.currentSpecContext = sc + suite.selectiveLock.Unlock() + + var deadlineChannel <-chan time.Time + if !deadline.IsZero() { + deadlineChannel = time.After(deadline.Sub(now)) + } + var gracePeriodChannel <-chan time.Time outcomeC := make(chan types.SpecState) failureC := make(chan types.Failure) @@ -597,33 +854,147 @@ func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text s suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e) } - outcome, failureFromRun := suite.failer.Drain() - outcomeC <- outcome + outcomeFromRun, failureFromRun := suite.failer.Drain() + failureFromRun.TimelineLocation = suite.generateTimelineLocation() + outcomeC <- outcomeFromRun failureC <- failureFromRun }() - node.Body() + node.Body(sc) finished = true }() - select { - case outcome := <-outcomeC: - failureFromRun := <-failureC - if outcome == types.SpecStatePassed { - return outcome, types.Failure{} + // progress polling timer and channel + var emitProgressNow <-chan time.Time + var progressPoller *time.Timer + var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval + if node.PollProgressAfter >= 0 { + pollProgressAfter = node.PollProgressAfter + } + if node.PollProgressInterval >= 0 { + pollProgressInterval = node.PollProgressInterval + } + if pollProgressAfter > 0 { + progressPoller = time.NewTimer(pollProgressAfter) + emitProgressNow = progressPoller.C + defer progressPoller.Stop() + } + + // now we wait for an outcome, an interrupt, a timeout, or a progress poll + for { + select { + case outcomeFromRun := <-outcomeC: + failureFromRun := <-failureC + if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) { + // we've already been interrupted/timed out. we just managed to actually exit + // before the grace period elapsed + // if we have a failure message we attach it as an additional failure + if outcomeFromRun != types.SpecStatePassed { + additionalFailure := types.AdditionalFailure{ + State: outcomeFromRun, + Failure: failure, //we make a copy - this will include all the configuration set up above... + } + //...and then we update the failure with the details from failureFromRun + additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation + additionalFailure.Failure.ProgressReport = types.ProgressReport{} + if outcome == types.SpecStateTimedout { + additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message) + } else { + additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message) + } + suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure) + failure.AdditionalFailure = &additionalFailure + } + return outcome, failure + } + if outcomeFromRun.Is(types.SpecStatePassed) { + return outcomeFromRun, types.Failure{} + } else { + failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation + suite.reporter.EmitFailure(outcomeFromRun, failure) + return outcomeFromRun, failure + } + case <-gracePeriodChannel: + if node.HasContext && outcome.Is(types.SpecStateTimedout) { + report := suite.generateProgressReport(false) + report.Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed. The node has now leaked and is running in the background.\nHere's a current progress report:" + suite.emitProgressReport(report) + } + return outcome, failure + case <-deadlineChannel: + // we're out of time - the outcome is a timeout and we capture the failure and progress report + outcome = types.SpecStateTimedout + failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation() + failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput() + failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay) + deadlineChannel = nil + suite.reporter.EmitFailure(outcome, failure) + + // tell the spec to stop. it's important we generate the progress report first to make sure we capture where + // the spec is actually stuck + sc.cancel() + //and now we wait for the grace period + gracePeriodChannel = time.After(gracePeriod) + case <-interruptStatus.Channel: + interruptStatus = suite.interruptHandler.Status() + deadlineChannel = nil // don't worry about deadlines, time's up now + + failureTimelineLocation := suite.generateTimelineLocation() + progressReport := suite.generateProgressReport(true) + + if outcome == types.SpecStateInvalid { + outcome = types.SpecStateInterrupted + failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation + if interruptStatus.ShouldIncludeProgressReport() { + failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput() + failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}" + } + suite.reporter.EmitFailure(outcome, failure) + } + + progressReport = progressReport.WithoutOtherGoroutines() + sc.cancel() + + if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { + if interruptStatus.ShouldIncludeProgressReport() { + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message()) + suite.emitProgressReport(progressReport) + } + return outcome, failure + } + if interruptStatus.ShouldIncludeProgressReport() { + if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport { + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + } else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly { + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + } + suite.emitProgressReport(progressReport) + } + + if gracePeriodChannel == nil { + // we haven't given grace yet... so let's + gracePeriodChannel = time.After(gracePeriod) + } else { + // we've already given grace. time's up. now. + return outcome, failure + } + case <-emitProgressNow: + report := suite.generateProgressReport(false) + report.Message = "{{bold}}Automatically polling progress:{{/}}" + suite.emitProgressReport(report) + if pollProgressInterval > 0 { + progressPoller.Reset(pollProgressInterval) + } } - failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic - return outcome, failure - case <-interruptChannel: - failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation - return types.SpecStateInterrupted, failure } } +// TODO: search for usages and consider if reporter.EmitFailure() is necessary func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure { return types.Failure{ Message: message, Location: node.CodeLocation, + TimelineLocation: suite.generateTimelineLocation(), FailureNodeContext: types.FailureNodeIsLeafNode, FailureNodeType: node.NodeType, FailureNodeLocation: node.CodeLocation, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index 2f42b26..92acc0a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -5,35 +5,62 @@ import ( "io" "os" + "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal" "github.com/onsi/ginkgo/v2/types" ) type failFunc func(message string, callerSkip ...int) type skipFunc func(message string, callerSkip ...int) -type cleanupFunc func(args ...interface{}) +type cleanupFunc func(args ...any) type reportFunc func() types.SpecReport +type addReportEntryFunc func(names string, args ...any) +type ginkgoWriterInterface interface { + io.Writer -func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, offset int) *ginkgoTestingTProxy { + Print(a ...interface{}) + Printf(format string, a ...interface{}) + Println(a ...interface{}) +} +type ginkgoRecoverFunc func() +type attachProgressReporterFunc func(func() string) func() + +func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy { return &ginkgoTestingTProxy{ - fail: fail, - offset: offset, - writer: writer, - skip: skip, - cleanup: cleanup, - report: report, + fail: fail, + offset: offset, + writer: writer, + skip: skip, + cleanup: cleanup, + report: report, + addReportEntry: addReportEntry, + ginkgoRecover: ginkgoRecover, + attachProgressReporter: attachProgressReporter, + randomSeed: randomSeed, + parallelProcess: parallelProcess, + parallelTotal: parallelTotal, + f: formatter.NewWithNoColorBool(noColor), } } type ginkgoTestingTProxy struct { - fail failFunc - skip skipFunc - cleanup cleanupFunc - report reportFunc - offset int - writer io.Writer + fail failFunc + skip skipFunc + cleanup cleanupFunc + report reportFunc + offset int + writer ginkgoWriterInterface + addReportEntry addReportEntryFunc + ginkgoRecover ginkgoRecoverFunc + attachProgressReporter attachProgressReporterFunc + randomSeed int64 + parallelProcess int + parallelTotal int + f formatter.Formatter } +// basic testing.T support + func (t *ginkgoTestingTProxy) Cleanup(f func()) { t.cleanup(f, internal.Offset(1)) } @@ -81,7 +108,7 @@ func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { } func (t *ginkgoTestingTProxy) Helper() { - // No-op + types.MarkAsHelper(1) } func (t *ginkgoTestingTProxy) Log(args ...interface{}) { @@ -126,3 +153,54 @@ func (t *ginkgoTestingTProxy) TempDir() string { return tmpDir } + +// FullGinkgoTInterface +func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) { + finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways} + t.addReportEntry(name, append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) { + finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose} + t.addReportEntry(name, append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) { + finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever} + t.addReportEntry(name, append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) Print(a ...any) { + t.writer.Print(a...) +} +func (t *ginkgoTestingTProxy) Printf(format string, a ...any) { + t.writer.Printf(format, a...) +} +func (t *ginkgoTestingTProxy) Println(a ...any) { + t.writer.Println(a...) +} +func (t *ginkgoTestingTProxy) F(format string, args ...any) string { + return t.f.F(format, args...) +} +func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string { + return t.f.Fi(indentation, format, args...) +} +func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string { + return t.f.Fiw(indentation, maxWidth, format, args...) +} +func (t *ginkgoTestingTProxy) GinkgoRecover() { + t.ginkgoRecover() +} +func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) { + finalArgs := []any{internal.Offset(1)} + t.cleanup(append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) RandomSeed() int64 { + return t.randomSeed +} +func (t *ginkgoTestingTProxy) ParallelProcess() int { + return t.parallelProcess +} +func (t *ginkgoTestingTProxy) ParallelTotal() int { + return t.parallelTotal +} +func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() { + return t.attachProgressReporter(f) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go index 70f0a41..28a45b0 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -5,6 +5,9 @@ import ( "fmt" "io" "sync" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" ) type WriterMode uint @@ -19,24 +22,30 @@ type WriterInterface interface { Truncate() Bytes() []byte + Len() int } -//Writer implements WriterInterface and GinkgoWriterInterface +// Writer implements WriterInterface and GinkgoWriterInterface type Writer struct { buffer *bytes.Buffer outWriter io.Writer lock *sync.Mutex mode WriterMode + streamIndent []byte + indentNext bool + teeWriters []io.Writer } func NewWriter(outWriter io.Writer) *Writer { return &Writer{ - buffer: &bytes.Buffer{}, - lock: &sync.Mutex{}, - outWriter: outWriter, - mode: WriterModeStreamAndBuffer, + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + mode: WriterModeStreamAndBuffer, + streamIndent: []byte(" "), + indentNext: true, } } @@ -46,6 +55,14 @@ func (w *Writer) SetMode(mode WriterMode) { w.mode = mode } +func (w *Writer) Len() int { + w.lock.Lock() + defer w.lock.Unlock() + return w.buffer.Len() +} + +var newline = []byte("\n") + func (w *Writer) Write(b []byte) (n int, err error) { w.lock.Lock() defer w.lock.Unlock() @@ -55,7 +72,21 @@ func (w *Writer) Write(b []byte) (n int, err error) { } if w.mode == WriterModeStreamAndBuffer { - w.outWriter.Write(b) + line, remaining, found := []byte{}, b, false + for len(remaining) > 0 { + line, remaining, found = bytes.Cut(remaining, newline) + if len(line) > 0 { + if w.indentNext { + w.outWriter.Write(w.streamIndent) + w.indentNext = false + } + w.outWriter.Write(line) + } + if found { + w.outWriter.Write(newline) + w.indentNext = true + } + } } return w.buffer.Write(b) } @@ -75,7 +106,7 @@ func (w *Writer) Bytes() []byte { return copied } -//GinkgoWriterInterface +// GinkgoWriterInterface func (w *Writer) TeeTo(writer io.Writer) { w.lock.Lock() defer w.lock.Unlock() @@ -101,3 +132,9 @@ func (w *Writer) Printf(format string, a ...interface{}) { func (w *Writer) Println(a ...interface{}) { fmt.Fprintln(w, a...) } + +func GinkgoLogrFunc(writer *Writer) logr.Logger { + return funcr.New(func(prefix, args string) { + writer.Printf("%s", args) + }, funcr.Options{}) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 0edd44b..56b7be7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -12,6 +12,8 @@ import ( "io" "runtime" "strings" + "sync" + "time" "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/types" @@ -22,13 +24,16 @@ type DefaultReporter struct { writer io.Writer // managing the emission stream - lastChar string + lastCharWasNewline bool lastEmissionWasDelimiter bool // rendering specDenoter string retryDenoter string formatter formatter.Formatter + + runningInParallel bool + lock *sync.Mutex } func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { @@ -43,12 +48,13 @@ func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultRep conf: conf, writer: writer, - lastChar: "\n", + lastCharWasNewline: true, lastEmissionWasDelimiter: false, specDenoter: "•", retryDenoter: "↺", formatter: formatter.NewWithNoColorBool(conf.NoColor), + lock: &sync.Mutex{}, } if runtime.GOOS == "windows" { reporter.specDenoter = "+" @@ -96,163 +102,10 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { } } -func (r *DefaultReporter) WillRun(report types.SpecReport) { - if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) { - return - } - - r.emitDelimiter() - indentation := uint(0) - if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText)) - } else { - if len(report.ContainerHierarchyTexts) > 0 { - r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " ")) - indentation = 1 - } - line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText) - labels := report.Labels() - if len(labels) > 0 { - line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", ")) - } - r.emitBlock(line) - } - r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation)) -} - -func (r *DefaultReporter) DidRun(report types.SpecReport) { - v := r.conf.Verbosity() - var header, highlightColor string - includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter - succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct) - - hasGW := report.CapturedGinkgoWriterOutput != "" - hasStd := report.CapturedStdOutErr != "" - hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose))) - - if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - denoter = fmt.Sprintf("[%s]", report.LeafNodeType) - } - - switch report.State { - case types.SpecStatePassed: - highlightColor, succinctLocationBlock = "{{green}}", v.LT(types.VerbosityLevelVerbose) - emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW - if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports { - header = fmt.Sprintf("%s PASSED", denoter) - } else { - return - } - } else { - header, stream = denoter, true - if report.NumAttempts > 1 { - header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false - } - if report.RunTime > r.conf.SlowSpecThreshold { - header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false - } - } - if hasStd || emitGinkgoWriterOutput || hasEmittableReports { - stream = false - } - case types.SpecStatePending: - highlightColor = "{{yellow}}" - includeRuntime, emitGinkgoWriterOutput = false, false - if v.Is(types.VerbosityLevelSuccinct) { - header, stream = "P", true - } else { - header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose) - } - case types.SpecStateSkipped: - highlightColor = "{{cyan}}" - if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) { - header = "S [SKIPPED]" - } else { - header, stream = "S", true - } - case types.SpecStateFailed: - highlightColor, header = "{{red}}", fmt.Sprintf("%s [FAILED]", denoter) - case types.SpecStatePanicked: - highlightColor, header = "{{magenta}}", fmt.Sprintf("%s! [PANICKED]", denoter) - case types.SpecStateInterrupted: - highlightColor, header = "{{orange}}", fmt.Sprintf("%s! [INTERRUPTED]", denoter) - case types.SpecStateAborted: - highlightColor, header = "{{coral}}", fmt.Sprintf("%s! [ABORTED]", denoter) - } - - // Emit stream and return - if stream { - r.emit(r.f(highlightColor + header + "{{/}}")) - return - } - - // Emit header - r.emitDelimiter() - if includeRuntime { - header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) - } - r.emitBlock(r.f(highlightColor + header + "{{/}}")) - - // Emit Code Location Block - r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false)) - - //Emit Stdout/Stderr Output - if hasStd { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}")) - } - - //Emit Captured GinkgoWriter Output - if emitGinkgoWriterOutput && hasGW { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) - r.emitBlock(r.fi(2, "%s", report.CapturedGinkgoWriterOutput)) - r.emitBlock(r.fi(1, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) - } - - if hasEmittableReports { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}")) - reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) - if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) { - reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose) - } - for _, entry := range reportEntries { - r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))) - if representation := entry.StringRepresentation(); representation != "" { - r.emitBlock(r.fi(3, representation)) - } - } - r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}")) - } - - // Emit Failure Message - if !report.Failure.IsZero() { - r.emitBlock("\n") - r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.Message)) - r.emitBlock(r.fi(1, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.Location)) - if report.Failure.ForwardedPanic != "" { - r.emitBlock("\n") - r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.ForwardedPanic)) - } - - if r.conf.FullTrace || report.Failure.ForwardedPanic != "" { - r.emitBlock("\n") - r.emitBlock(r.fi(1, highlightColor+"Full Stack Trace{{/}}")) - r.emitBlock(r.fi(2, "%s", report.Failure.Location.FullStackTrace)) - } - } - - r.emitDelimiter() -} - func (r *DefaultReporter) SuiteDidEnd(report types.Report) { failures := report.SpecReports.WithState(types.SpecStateFailureStates) if len(failures) > 0 { - r.emitBlock("\n\n") + r.emitBlock("\n") if len(failures) > 1 { r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) } else { @@ -265,10 +118,12 @@ func (r *DefaultReporter) SuiteDidEnd(report types.Report) { highlightColor, heading = "{{magenta}}", "[PANICKED!]" case types.SpecStateAborted: highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateTimedout: + highlightColor, heading = "{{orange}}", "[TIMEDOUT]" case types.SpecStateInterrupted: highlightColor, heading = "{{orange}}", "[INTERRUPTED]" } - locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true) + locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true) r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) } } @@ -309,38 +164,495 @@ func (r *DefaultReporter) SuiteDidEnd(report types.Report) { if specs.CountOfFlakedSpecs() > 0 { r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) } + if specs.CountOfRepeatedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) + } r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) } } +func (r *DefaultReporter) WillRun(report types.SpecReport) { + v := r.conf.Verbosity() + if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel { + return + } + + r.emitDelimiter(0) + r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) +} + +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + inParallel := report.RunningInParallel + + header := r.specDenoter + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + header = fmt.Sprintf("[%s]", report.LeafNodeType) + } + highlightColor := r.highlightColorForState(report.State) + + // have we already been streaming the timeline? + timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel + + // should we show the timeline? + var timeline types.Timeline + showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed()) + if showTimeline { + timeline = report.Timeline().WithoutHiddenReportEntries() + keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) || + (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) || + (report.Failed() && r.conf.ShowNodeEvents) + if !keepVeryVerboseSpecEvents { + timeline = timeline.WithoutVeryVerboseSpecEvents() + } + if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" { + // the timeline is completely empty - don't show it + showTimeline = false + } + if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 { + //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report + failure, isFailure := timeline[0].(types.Failure) + if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) { + showTimeline = false + } + } + } + + // should we have a separate section for always-visible reports? + showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) + + // should we have a separate section for captured stdout/stderr + showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "") + + // given all that - do we have any actual content to show? or are we a single denoter in a stream? + reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped)) + + // should we show a runtime? + includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "") + + // should we show the codelocation block? + showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed) + + switch report.State { + case types.SpecStatePassed: + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent { + return + } + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + header = fmt.Sprintf("%s PASSED", header) + } + if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { + header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true + } + case types.SpecStatePending: + header = "P" + if v.GT(types.VerbosityLevelSuccinct) { + header, reportHasContent = "P [PENDING]", true + } + case types.SpecStateSkipped: + header = "S" + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") { + header, reportHasContent = "S [SKIPPED]", true + } + default: + header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State)) + if report.MaxMustPassRepeatedly > 1 { + header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts) + } + } + + // If we have no content to show, jsut emit the header and return + if !reportHasContent { + r.emit(r.f(highlightColor + header + "{{/}}")) + return + } + + if includeRuntime { + header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) + } + + // Emit header + if !timelineHasBeenStreaming { + r.emitDelimiter(0) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + if showCodeLocation { + r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false)) + } + + //Emit Stdout/Stderr Output + if showSeparateStdSection { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + } + + if showSeparateVisibilityAlwaysReportsSection { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + } + + if showTimeline { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) + r.emitTimeline(1, report, timeline) + r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + } + + // Emit Failure Message + if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) { + r.emitBlock("\n") + r.emitFailure(1, report.State, report.Failure, true) + if len(report.AdditionalFailures) > 0 { + r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}")) + } + } + + r.emitDelimiter(0) +} + +func (r *DefaultReporter) highlightColorForState(state types.SpecState) string { + switch state { + case types.SpecStatePassed: + return "{{green}}" + case types.SpecStatePending: + return "{{yellow}}" + case types.SpecStateSkipped: + return "{{cyan}}" + case types.SpecStateFailed: + return "{{red}}" + case types.SpecStateTimedout: + return "{{orange}}" + case types.SpecStatePanicked: + return "{{magenta}}" + case types.SpecStateInterrupted: + return "{{orange}}" + case types.SpecStateAborted: + return "{{coral}}" + default: + return "{{gray}}" + } +} + +func (r *DefaultReporter) humanReadableState(state types.SpecState) string { + return strings.ToUpper(state.String()) +} + +func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) { + isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) + gw := report.CapturedGinkgoWriterOutput + cursor := 0 + for _, entry := range timeline { + tl := entry.GetTimelineLocation() + if tl.Offset < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) + cursor = tl.Offset + } else if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + cursor = len(gw) + } + switch x := entry.(type) { + case types.Failure: + if isVeryVerbose { + r.emitFailure(indent, report.State, x, false) + } else { + r.emitShortFailure(indent, report.State, x) + } + case types.AdditionalFailure: + if isVeryVerbose { + r.emitFailure(indent, x.State, x.Failure, true) + } else { + r.emitShortFailure(indent, x.State, x.Failure) + } + case types.ReportEntry: + r.emitReportEntry(indent, x) + case types.ProgressReport: + r.emitProgressReport(indent, false, x) + case types.SpecEvent: + if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { + r.emitSpecEvent(indent, x, isVeryVerbose) + } + } + } + if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + } +} + +func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) { + if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) { + r.emitShortFailure(1, state, failure) + } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) { + r.emitFailure(1, state, failure, true) + } +} + +func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) { + r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}", + r.humanReadableState(state), + failure.FailureNodeType, + failure.Location, + failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), + )) +} + +func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { + highlightColor := r.highlightColorForState(state) + r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) + } + + if r.conf.FullTrace || failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}")) + r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace)) + } + + if !failure.ProgressReport.IsZero() { + r.emitBlock("\n") + r.emitProgressReport(indent, false, failure.ProgressReport) + } + + if failure.AdditionalFailure != nil && includeAdditionalFailure { + r.emitBlock("\n") + r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true) + } +} + +func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { + r.emitDelimiter(1) + + if report.RunningInParallel { + r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) + } + shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) + r.emitProgressReport(1, shouldEmitGW, report) + r.emitDelimiter(1) +} + +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { + if report.Message != "" { + r.emitBlock(r.fi(indent, report.Message+"\n")) + indent += 1 + } + if report.LeafNodeText != "" { + subjectIndent := indent + if len(report.ContainerHierarchyTexts) > 0 { + r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " "))) + r.emit(" ") + subjectIndent = 0 + } + r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) + indent += 1 + } + if report.CurrentNodeType != types.NodeTypeInvalid { + r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType)) + if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) { + r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) + } + + r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) + indent += 1 + } + if report.CurrentStepText != "" { + r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) + indent += 1 + } + + if indent > 0 { + indent -= 1 + } + + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n") + if len(lines) <= limit { + r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput)) + } else { + r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) + for _, line := range lines[len(lines)-limit-1:] { + r.emitBlock(r.fi(indent+1, "%s", line)) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) + } + + if !report.SpecGoroutine().IsZero() { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n")) + r.emitGoroutines(indent, report.SpecGoroutine()) + } + + if len(report.AdditionalReports) > 0 { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}")) + for i, additionalReport := range report.AdditionalReports { + r.emit(r.fi(indent+1, additionalReport)) + if i < len(report.AdditionalReports)-1 { + r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}")) + } + + highlightedGoroutines := report.HighlightedGoroutines() + if len(highlightedGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n")) + r.emitGoroutines(indent, highlightedGoroutines...) + } + + otherGoroutines := report.OtherGoroutines() + if len(otherGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) + r.emitGoroutines(indent, otherGoroutines...) + } +} + +func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever { + return + } + r.emitReportEntry(1, entry) +} + +func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) { + r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(indent+1, representation)) + } +} + +func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) { + v := r.conf.Verbosity() + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) { + r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)) + } +} + +func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) { + location := "" + if includeLocation { + location = fmt.Sprintf("- %s ", event.CodeLocation.String()) + } + switch event.SpecEventType { + case types.SpecEventInvalid: + return + case types.SpecEventByStart: + r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventByEnd: + r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventNodeStart: + r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventNodeEnd: + r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventSpecRepeat: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventSpecRetry: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } +} + +func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { + for idx, g := range goroutines { + color := "{{gray}}" + if g.HasHighlights() { + color = "{{orange}}" + } + r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State)) + for _, fc := range g.Stack { + if fc.Highlight { + r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + r.emitSource(indent+3, fc) + } else { + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + } + } + + if idx+1 < len(goroutines) { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { + lines := fc.Source + if len(lines) == 0 { + return + } + + lTrim := 100000 + for _, line := range lines { + lTrimLine := len(line) - len(strings.TrimLeft(line, " \t")) + if lTrimLine < lTrim && len(line) > 0 { + lTrim = lTrimLine + } + } + if lTrim == 100000 { + lTrim = 0 + } + + for idx, line := range lines { + if len(line) > lTrim { + line = line[lTrim:] + } + if idx == fc.SourceHighlight { + r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line)) + } else { + r.emit(r.fi(indent, "| %s\n", line)) + } + } +} + /* Emitting to the writer */ func (r *DefaultReporter) emit(s string) { - if len(s) > 0 { - r.lastChar = s[len(s)-1:] - r.lastEmissionWasDelimiter = false - r.writer.Write([]byte(s)) - } + r._emit(s, false, false) } func (r *DefaultReporter) emitBlock(s string) { - if len(s) > 0 { - if r.lastChar != "\n" { - r.emit("\n") - } - r.emit(s) - if r.lastChar != "\n" { - r.emit("\n") - } - } + r._emit(s, true, false) } -func (r *DefaultReporter) emitDelimiter() { - if r.lastEmissionWasDelimiter { +func (r *DefaultReporter) emitDelimiter(indent uint) { + r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true) +} + +// a bit ugly - but we're trying to minimize locking on this hot codepath +func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { + if len(s) == 0 { return } - r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30))) - r.lastEmissionWasDelimiter = true + r.lock.Lock() + defer r.lock.Unlock() + if isDelimiter && r.lastEmissionWasDelimiter { + return + } + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + } + r.lastCharWasNewline = (s[len(s)-1:] == "\n") + r.writer.Write([]byte(s)) + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + r.lastCharWasNewline = true + } + r.lastEmissionWasDelimiter = isDelimiter } /* Rendering text */ @@ -356,13 +668,14 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) } -func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string { +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) } else { - texts = append(texts, report.LeafNodeText) + texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) locations = append(locations, report.LeafNodeLocation) @@ -372,24 +685,58 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo failureLocation = report.Failure.Location } + highlightIndex := -1 switch report.Failure.FailureNodeContext { case types.FailureNodeAtTopLevel: - texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...) + texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) + highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex - texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType) + texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType) locations[i] = failureLocation + highlightIndex = i case types.FailureNodeIsLeafNode: i := len(texts) - 1 - texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText) + texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText) locations[i] = failureLocation + highlightIndex = i + default: + //there is no failure, so we highlight the leaf ndoe + highlightIndex = len(texts) - 1 } out := "" - if succinct { - out += r.f("%s", r.cycleJoin(texts, " ")) + if veryVerbose { + for i := range texts { + if i == highlightIndex { + out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i]) + } else { + out += r.fi(uint(i), "%s", texts[i]) + } + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } else { + for i := range texts { + style := "{{/}}" + if i%2 == 1 { + style = "{{gray}}" + } + if i == highlightIndex { + style = highlightColor + "{{bold}}" + } + out += r.f(style+"%s", texts[i]) + if i < len(texts)-1 { + out += " " + } else { + out += r.f("{{/}}") + } + } flattenedLabels := report.Labels() if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) @@ -398,17 +745,15 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) } else { - out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1]) - } - } else { - for i := range texts { - out += r.fi(uint(i), "%s", texts[i]) - if len(labels[i]) > 0 { - out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + leafLocation := locations[len(locations)-1] + if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) { + out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation) + out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation) + } else { + out += r.f("{{gray}}%s{{/}}", leafLocation) } - out += "\n" - out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } + } return out } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go index 89d3007..613072e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -35,7 +35,7 @@ func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Repor FailOnPending: report.SuiteConfig.FailOnPending, FailFast: report.SuiteConfig.FailFast, FlakeAttempts: report.SuiteConfig.FlakeAttempts, - EmitSpecProgress: report.SuiteConfig.EmitSpecProgress, + EmitSpecProgress: false, DryRun: report.SuiteConfig.DryRun, ParallelNode: report.SuiteConfig.ParallelProcess, ParallelTotal: report.SuiteConfig.ParallelTotal, diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index febcc65..ca98609 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -15,12 +15,32 @@ import ( "fmt" "os" "strings" - "time" "github.com/onsi/ginkgo/v2/config" "github.com/onsi/ginkgo/v2/types" ) +type JunitReportConfig struct { + // Spec States for which no timeline should be emitted for system-err + // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs + OmitTimelinesForSpecState types.SpecState + + // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags + OmitFailureMessageAttr bool + + //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out + OmitCapturedStdOutErr bool + + // Enable OmitSpecLabels to prevent labels from appearing in the spec name + OmitSpecLabels bool + + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name + OmitLeafNodeType bool + + // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes + OmitSuiteSetupNodes bool +} + type JUnitTestSuites struct { XMLName xml.Name `xml:"testsuites"` // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) @@ -128,6 +148,10 @@ type JUnitFailure struct { } func GenerateJUnitReport(report types.Report, dst string) error { + return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{}) +} + +func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error { suite := JUnitTestSuite{ Name: report.SuiteDescription, Package: report.SuitePath, @@ -149,7 +173,6 @@ func GenerateJUnitReport(report types.Report, dst string) error { {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, - {"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, @@ -157,22 +180,33 @@ func GenerateJUnitReport(report types.Report, dst string) error { }, } for _, spec := range report.SpecReports { + if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt { + continue + } name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if config.OmitLeafNodeType { + name = "" + } if spec.FullText() != "" { name = name + " " + spec.FullText() } labels := spec.Labels() - if len(labels) > 0 { + if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } + name = strings.TrimSpace(name) test := JUnitTestCase{ Name: name, Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), - SystemOut: systemOutForUnstructureReporters(spec), - SystemErr: spec.CapturedGinkgoWriterOutput, + } + if !spec.State.Is(config.OmitTimelinesForSpecState) { + test.SystemErr = systemErrForUnstructuredReporters(spec) + } + if !config.OmitCapturedStdOutErr { + test.SystemOut = systemOutForUnstructuredReporters(spec) } suite.Tests += 1 @@ -191,28 +225,50 @@ func GenerateJUnitReport(report types.Report, dst string) error { test.Failure = &JUnitFailure{ Message: spec.Failure.Message, Type: "failed", - Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Failures += 1 + case types.SpecStateTimedout: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "timedout", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" } suite.Failures += 1 case types.SpecStateInterrupted: test.Error = &JUnitError{ - Message: "interrupted", + Message: spec.Failure.Message, Type: "interrupted", - Description: spec.Failure.Message, + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" } suite.Errors += 1 case types.SpecStateAborted: test.Failure = &JUnitFailure{ Message: spec.Failure.Message, Type: "aborted", - Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" } suite.Errors += 1 case types.SpecStatePanicked: test.Error = &JUnitError{ Message: spec.Failure.ForwardedPanic, Type: "panicked", - Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" } suite.Errors += 1 } @@ -278,21 +334,23 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) return messages, f.Close() } -func systemOutForUnstructureReporters(spec types.SpecReport) string { - systemOut := spec.CapturedStdOutErr - if len(spec.ReportEntries) > 0 { - systemOut += "\nReport Entries:\n" - for i, entry := range spec.ReportEntries { - systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano)) - if representation := entry.StringRepresentation(); representation != "" { - systemOut += representation + "\n" - } - if i+1 < len(spec.ReportEntries) { - systemOut += "--\n" - } - } +func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true) + if len(spec.AdditionalFailures) > 0 { + out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n") } - return systemOut + return out.String() +} + +func systemErrForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) + return out.String() +} + +func systemOutForUnstructuredReporters(spec types.SpecReport) string { + return spec.CapturedStdOutErr } // Deprecated JUnitReporter (so folks can still compile their suites) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go index 29f84e7..5e726c4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -9,11 +9,21 @@ type Reporter interface { WillRun(report types.SpecReport) DidRun(report types.SpecReport) SuiteDidEnd(report types.Report) + + //Timeline emission + EmitFailure(state types.SpecState, failure types.Failure) + EmitProgressReport(progressReport types.ProgressReport) + EmitReportEntry(entry types.ReportEntry) + EmitSpecEvent(event types.SpecEvent) } type NoopReporter struct{} -func (n NoopReporter) SuiteWillBegin(report types.Report) {} -func (n NoopReporter) WillRun(report types.SpecReport) {} -func (n NoopReporter) DidRun(report types.SpecReport) {} -func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {} +func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} +func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {} +func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index f9b1117..c186349 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -60,20 +60,24 @@ func GenerateTeamcityReport(report types.Report, dst string) error { } fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) case types.SpecStateFailed: - details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStatePanicked: - details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) + case types.SpecStateTimedout: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStateInterrupted: - fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted' details='%s']\n", name, tcEscape(spec.Failure.Message)) + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStateAborted: - details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) } - fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructureReporters(spec))) - fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(spec.CapturedGinkgoWriterOutput)) + fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec))) fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) } fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index 8750236..f33786a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -35,7 +35,7 @@ func CurrentSpecReport() SpecReport { } /* - ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter + ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter - ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted. - ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior). @@ -50,9 +50,9 @@ const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, Report /* AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport. It can take any of the following arguments: - - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. - - A ReportEntryVisibility enum to control the visibility of the ReportEntry - - An Offset or CodeLocation decoration to control the reported location of the ReportEntry + - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. + - A ReportEntryVisibility enum to control the visibility of the ReportEntry + - An Offset or CodeLocation decoration to control the reported location of the ReportEntry If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console. @@ -100,6 +100,25 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) } +/* +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. + +They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. +ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) + +# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite + +You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. +You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports +*/ +func ReportBeforeSuite(body func(Report), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) +} + /* ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. @@ -113,10 +132,13 @@ In addition to using ReportAfterSuite to programmatically generate suite reports You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports */ -func ReportAfterSuite(text string, body func(Report)) bool { - return pushNode(internal.NewReportAfterSuiteNode(text, body, types.NewCodeLocation(1))) +func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) } func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) { @@ -151,7 +173,8 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re if reporterConfig.TeamcityReport != "" { flags = append(flags, "--teamcity-report") } - pushNode(internal.NewReportAfterSuiteNode( + pushNode(internal.NewNode( + deprecationTracker, types.NodeTypeReportAfterSuite, fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), body, types.NewCustomCodeLocation("autogenerated by Ginkgo"), diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index 0db0e71..ac9b7ab 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -1,6 +1,7 @@ package ginkgo import ( + "context" "fmt" "reflect" "strings" @@ -12,7 +13,7 @@ import ( /* The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via: - fmt.Sprintf(formatString, parameters...) + fmt.Sprintf(formatString, parameters...) where parameters are the parameters passed into the entry. @@ -31,19 +32,20 @@ DescribeTable describes a table-driven spec. For example: - DescribeTable("a simple table", - func(x int, y int, expected bool) { - Ω(x > y).Should(Equal(expected)) - }, - Entry("x > y", 1, 0, true), - Entry("x == y", 0, 0, false), - Entry("x < y", 0, 1, false), - ) + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ω(x > y).Should(Equal(expected)) + }, + Entry("x > y", 1, 0, true), + Entry("x == y", 0, 0, false), + Entry("x < y", 0, 1, false), + ) You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns */ func DescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() generateTable(description, args...) return true } @@ -52,6 +54,7 @@ func DescribeTable(description string, args ...interface{}) bool { You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. */ func FDescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() args = append(args, internal.Focus) generateTable(description, args...) return true @@ -61,6 +64,7 @@ func FDescribeTable(description string, args ...interface{}) bool { You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. */ func PDescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() args = append(args, internal.Pending) generateTable(description, args...) return true @@ -89,29 +93,34 @@ Subsequent arguments accept any Ginkgo decorators. These are filtered out and t Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in. +If you want to generate interruptible specs simply write a Table function that accepts a SpecContext as its first argument. You can then decorate individual Entrys with the NodeTimeout and SpecTimeout decorators. + You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs */ func Entry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* You can focus a particular entry with FEntry. This is equivalent to FIt. */ func FEntry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Focus) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* You can mark a particular entry as pending with PEntry. This is equivalent to PIt. */ func PEntry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Pending) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* @@ -119,12 +128,17 @@ You can mark a particular entry as pending with XEntry. This is equivalent to X */ var XEntry = PEntry +var contextType = reflect.TypeOf(new(context.Context)).Elem() +var specContextType = reflect.TypeOf(new(SpecContext)).Elem() + func generateTable(description string, args ...interface{}) { - cl := types.NewCodeLocation(2) + GinkgoHelper() + cl := types.NewCodeLocation(0) containerNodeArgs := []interface{}{cl} entries := []TableEntry{} var itBody interface{} + var itBodyType reflect.Type var tableLevelEntryDescription interface{} tableLevelEntryDescription = func(args ...interface{}) string { @@ -135,6 +149,10 @@ func generateTable(description string, args ...interface{}) { return "Entry: " + strings.Join(out, ", ") } + if len(args) == 1 { + exitIfErr(types.GinkgoErrors.MissingParametersForTableFunction(cl)) + } + for i, arg := range args { switch t := reflect.TypeOf(arg); { case t == nil: @@ -152,6 +170,7 @@ func generateTable(description string, args ...interface{}) { exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl)) } itBody = arg + itBodyType = reflect.TypeOf(itBody) default: containerNodeArgs = append(containerNodeArgs, arg) } @@ -164,7 +183,7 @@ func generateTable(description string, args ...interface{}) { var description string switch t := reflect.TypeOf(entry.description); { case t == nil: - err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation) + err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation, false) if err == nil { description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String() } @@ -173,7 +192,7 @@ func generateTable(description string, args ...interface{}) { case t == reflect.TypeOf(""): description = entry.description.(string) case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): - err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation) + err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation, false) if err == nil { description = invokeFunction(entry.description, entry.parameters)[0].String() } @@ -181,17 +200,37 @@ func generateTable(description string, args ...interface{}) { err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) } - if err == nil { - err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation) - } itNodeArgs := []interface{}{entry.codeLocation} itNodeArgs = append(itNodeArgs, entry.decorations...) - itNodeArgs = append(itNodeArgs, func() { - if err != nil { - panic(err) + + hasContext := false + if itBodyType.NumIn() > 0. { + if itBodyType.In(0).Implements(specContextType) { + hasContext = true + } else if itBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + hasContext = true } - invokeFunction(itBody, entry.parameters) - }) + } + + if err == nil { + err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext) + } + + if hasContext { + itNodeArgs = append(itNodeArgs, func(c SpecContext) { + if err != nil { + panic(err) + } + invokeFunction(itBody, append([]interface{}{c}, entry.parameters...)) + }) + } else { + itNodeArgs = append(itNodeArgs, func() { + if err != nil { + panic(err) + } + invokeFunction(itBody, entry.parameters) + }) + } pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...)) } @@ -223,9 +262,14 @@ func invokeFunction(function interface{}, parameters []interface{}) []reflect.Va return reflect.ValueOf(function).Call(inValues) } -func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation) error { +func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error { funcType := reflect.TypeOf(function) limit := funcType.NumIn() + offset := 0 + if hasContext { + limit = limit - 1 + offset = 1 + } if funcType.IsVariadic() { limit = limit - 1 } @@ -238,13 +282,13 @@ func validateParameters(function interface{}, parameters []interface{}, kind str var i = 0 for ; i < limit; i++ { actual := reflect.TypeOf(parameters[i]) - expected := funcType.In(i) + expected := funcType.In(i + offset) if !(actual == nil) && !actual.AssignableTo(expected) { return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl) } } if funcType.IsVariadic() { - expected := funcType.In(limit).Elem() + expected := funcType.In(limit + offset).Elem() for ; i < len(parameters); i++ { actual := reflect.TypeOf(parameters[i]) if !(actual == nil) && !actual.AssignableTo(expected) { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go index 1291091..9cd5768 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -7,6 +7,7 @@ import ( "runtime" "runtime/debug" "strings" + "sync" ) type CodeLocation struct { @@ -38,6 +39,73 @@ func (codeLocation CodeLocation) ContentsOfLine() string { return lines[codeLocation.LineNumber-1] } +type codeLocationLocator struct { + pcs map[uintptr]bool + helpers map[string]bool + lock *sync.Mutex +} + +func (c *codeLocationLocator) addHelper(pc uintptr) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.pcs[pc] { + return + } + c.lock.Unlock() + f := runtime.FuncForPC(pc) + c.lock.Lock() + if f == nil { + return + } + c.helpers[f.Name()] = true + c.pcs[pc] = true +} + +func (c *codeLocationLocator) hasHelper(name string) bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.helpers[name] +} + +func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation { + pc := make([]uintptr, 40) + n := runtime.Callers(skip+2, pc) + if n == 0 { + return CodeLocation{} + } + pc = pc[:n] + frames := runtime.CallersFrames(pc) + for { + frame, more := frames.Next() + if !c.hasHelper(frame.Function) { + return CodeLocation{FileName: frame.File, LineNumber: frame.Line} + } + if !more { + break + } + } + return CodeLocation{} +} + +var clLocator = &codeLocationLocator{ + pcs: map[uintptr]bool{}, + helpers: map[string]bool{}, + lock: &sync.Mutex{}, +} + +// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers. +func MarkAsHelper(optionalSkip ...int) { + skip := 1 + if len(optionalSkip) > 0 { + skip += optionalSkip[0] + } + pc, _, _, ok := runtime.Caller(skip) + if ok { + clLocator.addHelper(pc) + } +} + func NewCustomCodeLocation(message string) CodeLocation { return CodeLocation{ CustomMessage: message, @@ -45,14 +113,13 @@ func NewCustomCodeLocation(message string) CodeLocation { } func NewCodeLocation(skip int) CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - return CodeLocation{FileName: file, LineNumber: line} + return clLocator.getCodeLocation(skip + 1) } func NewCodeLocationWithStackTrace(skip int) CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - stackTrace := PruneStack(string(debug.Stack()), skip+1) - return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} + cl := clLocator.getCodeLocation(skip + 1) + cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1) + return cl } // PruneStack removes references to functions that are internal to Ginkgo diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 07ef4c3..1014c7b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -8,6 +8,7 @@ package types import ( "flag" "os" + "path/filepath" "runtime" "strconv" "strings" @@ -26,10 +27,14 @@ type SuiteConfig struct { FailOnPending bool FailFast bool FlakeAttempts int - EmitSpecProgress bool DryRun bool + PollProgressAfter time.Duration + PollProgressInterval time.Duration Timeout time.Duration + EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually OutputInterceptorMode string + SourceRoots []string + GracePeriod time.Duration ParallelProcess int ParallelTotal int @@ -42,6 +47,7 @@ func NewDefaultSuiteConfig() SuiteConfig { Timeout: time.Hour, ParallelProcess: 1, ParallelTotal: 1, + GracePeriod: 30 * time.Second, } } @@ -76,13 +82,12 @@ func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { // Configuration for Ginkgo's reporter type ReporterConfig struct { - NoColor bool - SlowSpecThreshold time.Duration - Succinct bool - Verbose bool - VeryVerbose bool - FullTrace bool - AlwaysEmitGinkgoWriter bool + NoColor bool + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + ShowNodeEvents bool JSONReport string JUnitReport string @@ -105,9 +110,7 @@ func (rc ReporterConfig) WillGenerateReport() bool { } func NewDefaultReporterConfig() ReporterConfig { - return ReporterConfig{ - SlowSpecThreshold: 5 * time.Second, - } + return ReporterConfig{} } // Configuration for the Ginkgo CLI @@ -230,6 +233,9 @@ type deprecatedConfig struct { SlowSpecThresholdWithFLoatUnits float64 Stream bool Notify bool + EmitSpecProgress bool + SlowSpecThreshold time.Duration + AlwaysEmitGinkgoWriter bool } // Flags @@ -270,10 +276,16 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, - {KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug", - Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."}, + {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", + Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, + {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", + Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."}, + {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug", + Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."}, {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", Usage: "Test suite fails if it does not complete within the specified timeout."}, + {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s", + Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."}, {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, @@ -290,6 +302,8 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug", + DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."}, } // ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) @@ -306,8 +320,6 @@ var ParallelConfigFlags = GinkgoFlags{ var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, suppress color output in default reporter."}, - {KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s", - Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."}, {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", Usage: "If set, emits more output including GinkgoWriter contents."}, {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", @@ -316,8 +328,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out a very succinct report"}, {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, - {KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed", - Usage: "If set, default reporter prints out captured output of passed tests."}, + {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", + Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, @@ -330,6 +342,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"}, + {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."}, } // BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process @@ -381,6 +395,10 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) } + if suiteConfig.GracePeriod <= 0 { + errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero()) + } + if len(suiteConfig.FocusFiles) > 0 { _, err := ParseFileFilters(suiteConfig.FocusFiles) if err != nil { @@ -583,13 +601,29 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { goFlagsConfig.Cover = true } + if goFlagsConfig.CoverPkg != "" { + coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",") + adjustedCoverPkgs := make([]string, len(coverPkgs)) + for i, coverPkg := range coverPkgs { + coverPkg = strings.Trim(coverPkg, " ") + if strings.HasPrefix(coverPkg, "./") { + // this is a relative coverPkg - we need to reroot it + adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./")) + } else { + // this is a package name - don't touch it + adjustedCoverPkgs[i] = coverPkg + } + } + goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") + } + args := []string{"test", "-c", "-o", destination, packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go index 2948dfa..f267bde 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -83,6 +83,13 @@ func (d deprecations) Nodot() Deprecation { } } +func (d deprecations) SuppressProgressReporting() Deprecation { + return Deprecation{ + Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.", + Version: "2.5.0", + } +} + type DeprecationTracker struct { deprecations map[Deprecation][]CodeLocation lock *sync.Mutex diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 40331d2..1e0dbfd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -108,8 +108,8 @@ Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/ func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" - if nodeType.Is(NodeTypeReportAfterSuite) { - docLink = "reporting-nodes---reportaftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" } return GinkgoError{ @@ -125,8 +125,8 @@ func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocatio func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" - if nodeType.Is(NodeTypeReportAfterSuite) { - docLink = "reporting-nodes---reportaftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" } return GinkgoError{ @@ -180,6 +180,15 @@ func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nod } } +func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly", + Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { return GinkgoError{ Heading: "Unknown Decorator", @@ -189,20 +198,55 @@ func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decor } } +func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + if nodeType.Is(NodeTypeContainer) { + mustGet = "{{bold}}func(){{/}}" + } return GinkgoError{ Heading: "Invalid Function", - Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. + Message: formatter.F(`[%s] node must be passed `+mustGet+`. You passed {{bold}}%s{{/}} instead.`, nodeType, t), CodeLocation: cl, DocLink: "node-decorators-overview", } } +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Multiple Functions", - Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but more than one was passed in.`, nodeType), + Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } @@ -211,12 +255,30 @@ func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Missing Functions", - Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but none was passed in.`, nodeType), + Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } } +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + /* Ordered Container errors */ func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ @@ -236,6 +298,15 @@ func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType N } } +func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error { + return GinkgoError{ + Heading: "ContinueOnFailure not decorating an outermost Ordered Container", + Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.", + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + /* DeferCleanup errors */ func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { return GinkgoError{ @@ -258,7 +329,7 @@ func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), - Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a ReportAfterEach or ReportAfterSuite.", + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.", CodeLocation: cl, DocLink: "cleaning-up-our-cleanup-code-defercleanup", } @@ -380,6 +451,15 @@ func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { } } +func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("No parameters have been passed to the Table Function"), + Message: fmt.Sprintf("The Table Function expected at least 1 parameter"), + CodeLocation: cl, + DocLink: "table-specs", + } +} + func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error { return GinkgoError{ Heading: "DescribeTable passed incorrect parameter type", @@ -498,6 +578,13 @@ func (g ginkgoErrors) DryRunInParallelConfiguration() error { } } +func (g ginkgoErrors) GracePeriodCannotBeZero() error { + return GinkgoError{ + Heading: "Ginkgo requires a positive --grace-period.", + Message: "Please set --grace-period to a positive duration. The default is 30s.", + } +} + func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { return GinkgoError{ Heading: "Conflicting reporter verbosity settings.", @@ -532,3 +619,12 @@ func (g ginkgoErrors) BothRepeatAndUntilItFails() error { Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", } } + +/* Stack-Trace parsing errors */ + +func (g ginkgoErrors) FailedToParseStackTrace(message string) error { + return GinkgoError{ + Heading: "Failed to Parse Stack Trace", + Message: message, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index 0403f9e..b0d3b65 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -272,12 +272,23 @@ func tokenize(input string) func() (*treeNode, error) { } } +func MustParseLabelFilter(input string) LabelFilter { + filter, err := ParseLabelFilter(input) + if err != nil { + panic(err) + } + return filter +} + func ParseLabelFilter(input string) (LabelFilter, error) { if DEBUG_LABEL_FILTER_PARSING { fmt.Println("\n==============") fmt.Println("Input: ", input) fmt.Print("Tokens: ") } + if input == "" { + return func(_ []string) bool { return true }, nil + } nextToken := tokenize(input) root := &treeNode{token: lfTokenRoot} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go index c64866c..7b1524b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -6,8 +6,8 @@ import ( "time" ) -//ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports -//and across the network connection when running in parallel +// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +// and across the network connection when running in parallel type ReportEntryValue struct { raw interface{} //unexported to prevent gob from freaking out about unregistered structs AsJSON string @@ -50,7 +50,6 @@ func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { }{ Representation: rev.String(), } - asJSON, err := json.Marshal(rev.raw) if err != nil { return nil, err @@ -86,10 +85,12 @@ func (rev *ReportEntryValue) GobDecode(data []byte) error { type ReportEntry struct { // Visibility captures the visibility policy for this ReportEntry Visibility ReportEntryVisibility - // Time captures the time the AddReportEntry was called - Time time.Time // Location captures the location of the AddReportEntry call Location CodeLocation + + Time time.Time //need this for backwards compatibility + TimelineLocation TimelineLocation + // Name captures the name of this report Name string // Value captures the (optional) object passed into AddReportEntry - this can be @@ -98,7 +99,7 @@ type ReportEntry struct { Value ReportEntryValue } -// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableStirng() is used to generate their representation. +// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation. type ColorableStringer interface { ColorableString() string } @@ -121,6 +122,10 @@ func (entry ReportEntry) GetRawValue() interface{} { return entry.Value.GetRawValue() } +func (entry ReportEntry) GetTimelineLocation() TimelineLocation { + return entry.TimelineLocation +} + type ReportEntries []ReportEntry func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index f30d23c..d048a8a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -2,6 +2,8 @@ package types import ( "encoding/json" + "fmt" + "sort" "strings" "time" ) @@ -56,19 +58,20 @@ type Report struct { SuiteConfig SuiteConfig //SpecReports is a list of all SpecReports generated by this test run + //It is empty when the SuiteReport is provided to ReportBeforeSuite SpecReports SpecReports } -//PreRunStats contains a set of stats captured before the test run begins. This is primarily used -//by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) -//and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +// PreRunStats contains a set of stats captured before the test run begins. This is primarily used +// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. type PreRunStats struct { TotalSpecs int SpecsThatWillRun int } -//Add is ued by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes -//to form a complete final report. +// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +// to form a complete final report. func (report Report) Add(other Report) Report { report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded @@ -147,14 +150,24 @@ type SpecReport struct { // ParallelProcess captures the parallel process that this spec ran on ParallelProcess int + // RunningInParallel captures whether this spec is part of a suite that ran in parallel + RunningInParallel bool + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) //It includes detailed information about the Failure Failure Failure - // NumAttempts captures the number of times this Spec was run. Flakey specs can be retried with - // ginkgo --flake-attempts=N + // NumAttempts captures the number of times this Spec was run. + // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + // Repeated specs can be retried with the use of the MustPassRepeatedly decorator NumAttempts int + // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + MaxFlakeAttempts int + + // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator + MaxMustPassRepeatedly int + // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter CapturedGinkgoWriterOutput string @@ -165,6 +178,15 @@ type SpecReport struct { // ReportEntries contains any reports added via `AddReportEntry` ReportEntries ReportEntries + + // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator + ProgressReports []ProgressReport + + // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode. + AdditionalFailures []AdditionalFailure + + // SpecEvents capture additional events that occur during the spec run + SpecEvents SpecEvents } func (report SpecReport) MarshalJSON() ([]byte, error) { @@ -184,9 +206,14 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { ParallelProcess int Failure *Failure `json:",omitempty"` NumAttempts int - CapturedGinkgoWriterOutput string `json:",omitempty"` - CapturedStdOutErr string `json:",omitempty"` - ReportEntries ReportEntries `json:",omitempty"` + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` }{ ContainerHierarchyTexts: report.ContainerHierarchyTexts, ContainerHierarchyLocations: report.ContainerHierarchyLocations, @@ -203,6 +230,8 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { Failure: nil, ReportEntries: nil, NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, CapturedStdOutErr: report.CapturedStdOutErr, } @@ -213,6 +242,15 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { if len(report.ReportEntries) > 0 { out.ReportEntries = report.ReportEntries } + if len(report.ProgressReports) > 0 { + out.ProgressReports = report.ProgressReports + } + if len(report.AdditionalFailures) > 0 { + out.AdditionalFailures = report.AdditionalFailures + } + if len(report.SpecEvents) > 0 { + out.SpecEvents = report.SpecEvents + } return json.Marshal(out) } @@ -230,13 +268,13 @@ func (report SpecReport) CombinedOutput() string { return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput } -//Failed returns true if report.State is one of the SpecStateFailureStates +// Failed returns true if report.State is one of the SpecStateFailureStates // (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) func (report SpecReport) Failed() bool { return report.State.Is(SpecStateFailureStates) } -//FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText func (report SpecReport) FullText() string { texts := []string{} texts = append(texts, report.ContainerHierarchyTexts...) @@ -246,7 +284,7 @@ func (report SpecReport) FullText() string { return strings.Join(texts, " ") } -//Labels returns a deduped set of all the spec's Labels. +// Labels returns a deduped set of all the spec's Labels. func (report SpecReport) Labels() []string { out := []string{} seen := map[string]bool{} @@ -268,7 +306,7 @@ func (report SpecReport) Labels() []string { return out } -//MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { filter, err := ParseLabelFilter(query) if err != nil { @@ -277,29 +315,54 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { return filter(report.Labels()), nil } -//FileName() returns the name of the file containing the spec +// FileName() returns the name of the file containing the spec func (report SpecReport) FileName() string { return report.LeafNodeLocation.FileName } -//LineNumber() returns the line number of the leaf node +// LineNumber() returns the line number of the leaf node func (report SpecReport) LineNumber() int { return report.LeafNodeLocation.LineNumber } -//FailureMessage() returns the failure message (or empty string if the test hasn't failed) +// FailureMessage() returns the failure message (or empty string if the test hasn't failed) func (report SpecReport) FailureMessage() string { return report.Failure.Message } -//FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) func (report SpecReport) FailureLocation() CodeLocation { return report.Failure.Location } +// Timeline() returns a timeline view of the report +func (report SpecReport) Timeline() Timeline { + timeline := Timeline{} + if !report.Failure.IsZero() { + timeline = append(timeline, report.Failure) + if report.Failure.AdditionalFailure != nil { + timeline = append(timeline, *(report.Failure.AdditionalFailure)) + } + } + for _, additionalFailure := range report.AdditionalFailures { + timeline = append(timeline, additionalFailure) + } + for _, reportEntry := range report.ReportEntries { + timeline = append(timeline, reportEntry) + } + for _, progressReport := range report.ProgressReports { + timeline = append(timeline, progressReport) + } + for _, specEvent := range report.SpecEvents { + timeline = append(timeline, specEvent) + } + sort.Sort(timeline) + return timeline +} + type SpecReports []SpecReport -//WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { count := 0 for i := range reports { @@ -319,7 +382,7 @@ func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { return out } -//WithState returns the subset of SpecReports with State matching one of the requested SpecStates +// WithState returns the subset of SpecReports with State matching one of the requested SpecStates func (reports SpecReports) WithState(states SpecState) SpecReports { count := 0 for i := range reports { @@ -338,7 +401,7 @@ func (reports SpecReports) WithState(states SpecState) SpecReports { return out } -//CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates func (reports SpecReports) CountWithState(states SpecState) int { n := 0 for i := range reports { @@ -349,17 +412,75 @@ func (reports SpecReports) CountWithState(states SpecState) int { return n } -//CountWithState returns the number of SpecReports that passed after multiple attempts +// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. func (reports SpecReports) CountOfFlakedSpecs() int { n := 0 for i := range reports { - if reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { n += 1 } } return n } +// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts +func (reports SpecReports) CountOfRepeatedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// TimelineLocation captures the location of an event in the spec's timeline +type TimelineLocation struct { + //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream + Offset int `json:",omitempty"` + + //Order is the order of the event with respect to other events. The absolute value of Order + //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order + Order int `json:",omitempty"` + + Time time.Time +} + +// TimelineEvent represent an event on the timeline +// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it +type TimelineEvent interface { + GetTimelineLocation() TimelineLocation +} + +type Timeline []TimelineEvent + +func (t Timeline) Len() int { return len(t) } +func (t Timeline) Less(i, j int) bool { + return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order +} +func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t Timeline) WithoutHiddenReportEntries() Timeline { + out := Timeline{} + for _, event := range t { + if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever { + continue + } + out = append(out, event) + } + return out +} + +func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline { + out := Timeline{} + for _, event := range t { + if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() { + continue + } + out = append(out, event) + } + return out +} + // Failure captures failure information for an individual test type Failure struct { // Message - the failure message passed into Fail(...). When using a matcher library @@ -372,6 +493,8 @@ type Failure struct { // This CodeLocation will include a fully-populated StackTrace Location CodeLocation + TimelineLocation TimelineLocation + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) // then ForwardedPanic will be populated with a string representation of the captured panic. ForwardedPanic string `json:",omitempty"` @@ -379,19 +502,32 @@ type Failure struct { // FailureNodeContext - one of three contexts describing the node in which the failure occurred: // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. - // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocaiton, and FailureNodeContainerIndex will be populated. + // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated. // // FailureNodeType will contain the NodeType of the node in which the failure occurred. // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. - FailureNodeContext FailureNodeContext - FailureNodeType NodeType - FailureNodeLocation CodeLocation - FailureNodeContainerIndex int + FailureNodeContext FailureNodeContext `json:",omitempty"` + + FailureNodeType NodeType `json:",omitempty"` + + FailureNodeLocation CodeLocation `json:",omitempty"` + + FailureNodeContainerIndex int `json:",omitempty"` + + //ProgressReport is populated if the spec was interrupted or timed out + ProgressReport ProgressReport `json:",omitempty"` + + //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck. + AdditionalFailure *AdditionalFailure `json:",omitempty"` } func (f Failure) IsZero() bool { - return f == Failure{} + return f.Message == "" && (f.Location == CodeLocation{}) +} + +func (f Failure) GetTimelineLocation() TimelineLocation { + return f.TimelineLocation } // FailureNodeContext captures the location context for the node containing the failing line of code @@ -424,6 +560,18 @@ func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) { return fncEnumSupport.MarshJSON(uint(fnc)) } +// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec +// these typically occur in clean up nodes after the spec has failed. +// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is +type AdditionalFailure struct { + State SpecState + Failure Failure +} + +func (f AdditionalFailure) GetTimelineLocation() TimelineLocation { + return f.Failure.TimelineLocation +} + // SpecState captures the state of a spec // To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` type SpecState uint @@ -438,6 +586,7 @@ const ( SpecStateAborted SpecStatePanicked SpecStateInterrupted + SpecStateTimedout ) var ssEnumSupport = NewEnumSupport(map[uint]string{ @@ -449,11 +598,15 @@ var ssEnumSupport = NewEnumSupport(map[uint]string{ uint(SpecStateAborted): "aborted", uint(SpecStatePanicked): "panicked", uint(SpecStateInterrupted): "interrupted", + uint(SpecStateTimedout): "timedout", }) func (ss SpecState) String() string { return ssEnumSupport.String(uint(ss)) } +func (ss SpecState) GomegaString() string { + return ssEnumSupport.String(uint(ss)) +} func (ss *SpecState) UnmarshalJSON(b []byte) error { out, err := ssEnumSupport.UnmarshJSON(b) *ss = SpecState(out) @@ -463,12 +616,131 @@ func (ss SpecState) MarshalJSON() ([]byte, error) { return ssEnumSupport.MarshJSON(uint(ss)) } -var SpecStateFailureStates = SpecStateFailed | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted +var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted func (ss SpecState) Is(states SpecState) bool { return ss&states != 0 } +// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace +type ProgressReport struct { + Message string `json:",omitempty"` + ParallelProcess int `json:",omitempty"` + RunningInParallel bool `json:",omitempty"` + + ContainerHierarchyTexts []string `json:",omitempty"` + LeafNodeText string `json:",omitempty"` + LeafNodeLocation CodeLocation `json:",omitempty"` + SpecStartTime time.Time `json:",omitempty"` + + CurrentNodeType NodeType `json:",omitempty"` + CurrentNodeText string `json:",omitempty"` + CurrentNodeLocation CodeLocation `json:",omitempty"` + CurrentNodeStartTime time.Time `json:",omitempty"` + + CurrentStepText string `json:",omitempty"` + CurrentStepLocation CodeLocation `json:",omitempty"` + CurrentStepStartTime time.Time `json:",omitempty"` + + AdditionalReports []string `json:",omitempty"` + + CapturedGinkgoWriterOutput string `json:",omitempty"` + TimelineLocation TimelineLocation `json:",omitempty"` + + Goroutines []Goroutine `json:",omitempty"` +} + +func (pr ProgressReport) IsZero() bool { + return pr.CurrentNodeType == NodeTypeInvalid +} + +func (pr ProgressReport) Time() time.Time { + return pr.TimelineLocation.Time +} + +func (pr ProgressReport) SpecGoroutine() Goroutine { + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine { + return goroutine + } + } + return Goroutine{} +} + +func (pr ProgressReport) HighlightedGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) OtherGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport { + out := pr + out.CapturedGinkgoWriterOutput = "" + return out +} + +func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport { + out := pr + filteredGoroutines := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + filteredGoroutines = append(filteredGoroutines, goroutine) + } + } + out.Goroutines = filteredGoroutines + return out +} + +func (pr ProgressReport) GetTimelineLocation() TimelineLocation { + return pr.TimelineLocation +} + +type Goroutine struct { + ID uint64 + State string + Stack []FunctionCall + IsSpecGoroutine bool +} + +func (g Goroutine) IsZero() bool { + return g.ID == 0 +} + +func (g Goroutine) HasHighlights() bool { + for _, fc := range g.Stack { + if fc.Highlight { + return true + } + } + + return false +} + +type FunctionCall struct { + Function string + Filename string + Line int + Highlight bool `json:",omitempty"` + Source []string `json:",omitempty"` + SourceHighlight int `json:",omitempty"` +} + // NodeType captures the type of a given Ginkgo Node type NodeType uint @@ -493,6 +765,7 @@ const ( NodeTypeReportBeforeEach NodeTypeReportAfterEach + NodeTypeReportBeforeSuite NodeTypeReportAfterSuite NodeTypeCleanupInvalid @@ -502,7 +775,9 @@ const ( ) var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt -var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite var ntEnumSupport = NewEnumSupport(map[uint]string{ uint(NodeTypeInvalid): "INVALID NODE TYPE", @@ -520,9 +795,10 @@ var ntEnumSupport = NewEnumSupport(map[uint]string{ uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", uint(NodeTypeReportBeforeEach): "ReportBeforeEach", uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite", uint(NodeTypeReportAfterSuite): "ReportAfterSuite", - uint(NodeTypeCleanupInvalid): "INVALID CLEANUP NODE", - uint(NodeTypeCleanupAfterEach): "DeferCleanup", + uint(NodeTypeCleanupInvalid): "DeferCleanup", + uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)", uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)", uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)", }) @@ -542,3 +818,99 @@ func (nt NodeType) MarshalJSON() ([]byte, error) { func (nt NodeType) Is(nodeTypes NodeType) bool { return nt&nodeTypes != 0 } + +/* +SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events. +*/ +type SpecEvent struct { + SpecEventType SpecEventType + + CodeLocation CodeLocation + TimelineLocation TimelineLocation + + Message string `json:",omitempty"` + Duration time.Duration `json:",omitempty"` + NodeType NodeType `json:",omitempty"` + Attempt int `json:",omitempty"` +} + +func (se SpecEvent) GetTimelineLocation() TimelineLocation { + return se.TimelineLocation +} + +func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool { + return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd) +} + +func (se SpecEvent) GomegaString() string { + out := &strings.Builder{} + out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ") + if se.Message != "" { + out.WriteString("Message=") + out.WriteString(`"` + se.Message + `",`) + } + if se.Duration != 0 { + out.WriteString("Duration=" + se.Duration.String() + ",") + } + if se.NodeType != NodeTypeInvalid { + out.WriteString("NodeType=" + se.NodeType.String() + ",") + } + if se.Attempt != 0 { + out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",") + } + out.WriteString("CL=" + se.CodeLocation.String() + ",") + out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset)) + + return out.String() +} + +type SpecEvents []SpecEvent + +func (se SpecEvents) WithType(seType SpecEventType) SpecEvents { + out := SpecEvents{} + for _, event := range se { + if event.SpecEventType.Is(seType) { + out = append(out, event) + } + } + return out +} + +type SpecEventType uint + +const ( + SpecEventInvalid SpecEventType = 0 + + SpecEventByStart SpecEventType = 1 << iota + SpecEventByEnd + SpecEventNodeStart + SpecEventNodeEnd + SpecEventSpecRepeat + SpecEventSpecRetry +) + +var seEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecEventInvalid): "INVALID SPEC EVENT", + uint(SpecEventByStart): "By", + uint(SpecEventByEnd): "By (End)", + uint(SpecEventNodeStart): "Node", + uint(SpecEventNodeEnd): "Node (End)", + uint(SpecEventSpecRepeat): "Repeat", + uint(SpecEventSpecRetry): "Retry", +}) + +func (se SpecEventType) String() string { + return seEnumSupport.String(uint(se)) +} +func (se *SpecEventType) UnmarshalJSON(b []byte) error { + out, err := seEnumSupport.UnmarshJSON(b) + *se = SpecEventType(out) + return err +} +func (se SpecEventType) MarshalJSON() ([]byte, error) { + return seEnumSupport.MarshJSON(uint(se)) +} + +func (se SpecEventType) Is(specEventTypes SpecEventType) bool { + return se&specEventTypes != 0 +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index c799014..8e7f740 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.1.6" +const VERSION = "2.9.2" diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore index 720c13c..52266ea 100644 --- a/vendor/github.com/onsi/gomega/.gitignore +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -3,3 +3,5 @@ . .idea gomega.iml +TODO.md +.vscode \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 0606586..fd8fc02 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,187 @@ +## 1.27.5 + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.9.1 to 2.9.2 (#653) [a215021] +- Bump github.com/go-task/slim-sprig (#652) [a26fed8] + +## 1.27.4 + +### Fixes +- improve error formatting and remove duplication of error message in Eventually/Consistently [854f075] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.9.0 to 2.9.1 (#650) [ccebd9b] + +## 1.27.3 + +### Fixes +- format.Object now always includes err.Error() when passed an error [86d97ef] +- Fix HaveExactElements to work inside ContainElement or other collection matchers (#648) [636757e] + +### Maintenance +- Bump github.com/golang/protobuf from 1.5.2 to 1.5.3 (#649) [cc16689] +- Bump github.com/onsi/ginkgo/v2 from 2.8.4 to 2.9.0 (#646) [e783366] + +## 1.27.2 + +### Fixes +- improve poll progress message when polling a consistently that has been passing [28a319b] + +### Maintenance +- bump ginkgo +- remove tools.go hack as Ginkgo 2.8.2 automatically pulls in the cli dependencies [81443b3] + +## 1.27.1 + +### Maintenance + +- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#640) [bc686cd] + +## 1.27.0 + +### Features +- Add HaveExactElements matcher (#634) [9d50783] +- update Gomega docs to discuss GinkgoHelper() [be32774] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.8.0 to 2.8.1 (#639) [296a68b] +- Bump golang.org/x/net from 0.5.0 to 0.6.0 (#638) [c2b098b] +- Bump github-pages from 227 to 228 in /docs (#636) [a9069ab] +- test: update matrix for Go 1.20 (#635) [6bd25c8] +- Bump github.com/onsi/ginkgo/v2 from 2.7.0 to 2.8.0 (#631) [5445f8b] +- Bump webrick from 1.7.0 to 1.8.1 in /docs (#630) [03e93bb] +- codeql: add ruby language (#626) [63c7d21] +- dependabot: add bundler package-ecosystem for docs (#625) [d92f963] + +## 1.26.0 + +### Features +- When a polled function returns an error, keep track of the actual and report on the matcher state of the last non-errored actual [21f3090] +- improve eventually failure message output [c530fb3] + +### Fixes +- fix several documentation spelling issues [e2eff1f] + + +## 1.25.0 + +### Features +- add `MustPassRepeatedly(int)` to asyncAssertion (#619) [4509f72] +- compare unwrapped errors using DeepEqual (#617) [aaeaa5d] + +### Maintenance +- Bump golang.org/x/net from 0.4.0 to 0.5.0 (#614) [c7cfea4] +- Bump github.com/onsi/ginkgo/v2 from 2.6.1 to 2.7.0 (#615) [71b8adb] +- Docs: Fix typo "MUltiple" -> "Multiple" (#616) [9351dda] +- clean up go.sum [cd1dc1d] + +## 1.24.2 + +### Fixes +- Correctly handle assertion failure panics for eventually/consistnetly "g Gomega"s in a goroutine [78f1660] +- docs:Fix typo "you an" -> "you can" (#607) [3187c1f] +- fixes issue #600 (#606) [808d192] + +### Maintenance +- Bump golang.org/x/net from 0.2.0 to 0.4.0 (#611) [6ebc0bf] +- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#612) [258cfc8] +- Bump github.com/onsi/ginkgo/v2 from 2.5.0 to 2.5.1 (#609) [e6c3eb9] + +## 1.24.1 + +### Fixes +- maintain backward compatibility for Eventually and Consisntetly's signatures [4c7df5e] +- fix small typo (#601) [ea0ebe6] + +### Maintenance +- Bump golang.org/x/net from 0.1.0 to 0.2.0 (#603) [1ba8372] +- Bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.5.0 (#602) [f9426cb] +- fix label-filter in test.yml [d795db6] +- stop running flakey tests and rely on external network dependencies in CI [7133290] + +## 1.24.0 + +### Features + +Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers. + +This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable. + +### Maintenance + +- Update BeComparableTo documentation [756eaa0] + +## 1.23.0 + +### Features +- Custom formatting on a per-type basis can be provided using `format.RegisterCustomFormatter()` -- see the docs [here](https://onsi.github.io/gomega/#adjusting-output) + +- Substantial improvement have been made to `StopTrying()`: + - Users can now use `StopTrying().Wrap(err)` to wrap errors and `StopTrying().Attach(description, object)` to attach arbitrary objects to the `StopTrying()` error + - `StopTrying()` is now always interpreted as a failure. If you are an early adopter of `StopTrying()` you may need to change your code as the prior version would match against the returned value even if `StopTrying()` was returned. Going forward the `StopTrying()` api should remain stable. + - `StopTrying()` and `StopTrying().Now()` can both be used in matchers - not just polled functions. + +- `TryAgainAfter(duration)` is used like `StopTrying()` but instructs `Eventually` and `Consistently` that the poll should be tried again after the specified duration. This allows you to dynamically adjust the polling duration. + +- `ctx` can now be passed-in as the first argument to `Eventually` and `Consistently`. + +## Maintenance + +- Bump github.com/onsi/ginkgo/v2 from 2.3.0 to 2.3.1 (#597) [afed901] +- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#599) [7c691b3] +- Bump github.com/google/go-cmp from 0.5.8 to 0.5.9 (#587) [ff22665] + +## 1.22.1 + +## Fixes +- When passed a context and no explicit timeout, Eventually will only timeout when the context is cancelled [e5105cf] +- Allow StopTrying() to be wrapped [bf3cba9] + +## Maintenance +- bump to ginkgo v2.3.0 [c5d5c39] + +## 1.22.0 + +### Features + +Several improvements have been made to `Eventually` and `Consistently` in this and the most recent releases: + +- Eventually and Consistently can take a context.Context [65c01bc] + This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts. +- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9] +- Eventually/Consistently will forward an attached context to functions that ask for one [e2091c5] +- Eventually/Consistently supports passing arguments to functions via WithArguments() [a2dc7c3] +- Eventually and Consistently can now be stopped early with StopTrying(message) and StopTrying(message).Now() [52976bb] + +These improvements are all documented in [Gomega's docs](https://onsi.github.io/gomega/#making-asynchronous-assertions) + +## Fixes + +## Maintenance + +## 1.21.1 + +### Features +- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9] + +## 1.21.0 + +### Features +- Eventually and Consistently can take a context.Context [65c01bc] + This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts. +- Introduces Eventually.Within.ProbeEvery with tests and documentation (#591) [f633800] +- New BeKeyOf matcher with documentation and unit tests (#590) [fb586b3] + +## Fixes +- Cover the entire gmeasure suite with leak detection [8c54344] +- Fix gmeasure leak [119d4ce] +- Ignore new Ginkgo ProgressSignal goroutine in gleak [ba548e2] + +## Maintenance + +- Fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency (#596) [12469a0] + + ## 1.20.2 ## Fixes diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md index 2d30d99..9973fff 100644 --- a/vendor/github.com/onsi/gomega/RELEASING.md +++ b/vendor/github.com/onsi/gomega/RELEASING.md @@ -1,7 +1,13 @@ A Gomega release is a tagged sha and a GitHub release. To cut a release: 1. Ensure CHANGELOG.md is up to date. - - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Use + ```bash + LAST_VERSION=$(git tag --sort=version:refname | tail -n1) + CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION) + echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md + ``` + to update the changelog - Categorize the changes into - Breaking Changes (requires a major version) - New Features (minor version) diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 6e78c39..56bdd05 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -52,7 +52,7 @@ var CharactersAroundMismatchToInclude uint = 5 var contextType = reflect.TypeOf((*context.Context)(nil)).Elem() var timeType = reflect.TypeOf(time.Time{}) -//The default indentation string emitted by the format package +// The default indentation string emitted by the format package var Indent = " " var longFormThreshold = 20 @@ -65,6 +65,52 @@ type GomegaStringer interface { GomegaString() string } +/* +CustomFormatters can be registered with Gomega via RegisterCustomFormatter() +Any value to be rendered by Gomega is passed to each registered CustomFormatters. +The CustomFormatter signals that it will handle formatting the value by returning (formatted-string, true) +If the CustomFormatter does not want to handle the object it should return ("", false) + +Strings returned by CustomFormatters are not truncated +*/ +type CustomFormatter func(value interface{}) (string, bool) +type CustomFormatterKey uint + +var customFormatterKey CustomFormatterKey = 1 + +type customFormatterKeyPair struct { + CustomFormatter + CustomFormatterKey +} + +/* +RegisterCustomFormatter registers a CustomFormatter and returns a CustomFormatterKey + +You can call UnregisterCustomFormatter with the returned key to unregister the associated CustomFormatter +*/ +func RegisterCustomFormatter(customFormatter CustomFormatter) CustomFormatterKey { + key := customFormatterKey + customFormatterKey += 1 + customFormatters = append(customFormatters, customFormatterKeyPair{customFormatter, key}) + return key +} + +/* +UnregisterCustomFormatter unregisters a previously registered CustomFormatter. You should pass in the key returned by RegisterCustomFormatter +*/ +func UnregisterCustomFormatter(key CustomFormatterKey) { + formatters := []customFormatterKeyPair{} + for _, f := range customFormatters { + if f.CustomFormatterKey == key { + continue + } + formatters = append(formatters, f) + } + customFormatters = formatters +} + +var customFormatters = []customFormatterKeyPair{} + /* Generates a formatted matcher success/failure message of the form: @@ -212,24 +258,35 @@ Set PrintContextObjects to true to print the content of objects implementing con func Object(object interface{}, indentation uint) string { indent := strings.Repeat(Indent, int(indentation)) value := reflect.ValueOf(object) - return fmt.Sprintf("%s<%s>: %s", indent, formatType(value), formatValue(value, indentation)) + commonRepresentation := "" + if err, ok := object.(error); ok { + commonRepresentation += "\n" + IndentString(err.Error(), indentation) + "\n" + indent + } + return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation)) } /* IndentString takes a string and indents each line by the specified amount. */ func IndentString(s string, indentation uint) string { + return indentString(s, indentation, true) +} + +func indentString(s string, indentation uint, indentFirstLine bool) string { + result := &strings.Builder{} components := strings.Split(s, "\n") - result := "" indent := strings.Repeat(Indent, int(indentation)) for i, component := range components { - result += indent + component + if i > 0 || indentFirstLine { + result.WriteString(indent) + } + result.WriteString(component) if i < len(components)-1 { - result += "\n" + result.WriteString("\n") } } - return result + return result.String() } func formatType(v reflect.Value) string { @@ -261,18 +318,27 @@ func formatValue(value reflect.Value, indentation uint) string { if value.CanInterface() { obj := value.Interface() + // if a CustomFormatter handles this values, we'll go with that + for _, customFormatter := range customFormatters { + formatted, handled := customFormatter.CustomFormatter(obj) + // do not truncate a user-provided CustomFormatter() + if handled { + return indentString(formatted, indentation+1, false) + } + } + // GomegaStringer will take precedence to other representations and disregards UseStringerRepresentation if x, ok := obj.(GomegaStringer); ok { - // do not truncate a user-defined GoMegaString() value - return x.GomegaString() + // do not truncate a user-defined GomegaString() value + return indentString(x.GomegaString(), indentation+1, false) } if UseStringerRepresentation { switch x := obj.(type) { case fmt.GoStringer: - return truncateLongStrings(x.GoString()) + return indentString(truncateLongStrings(x.GoString()), indentation+1, false) case fmt.Stringer: - return truncateLongStrings(x.String()) + return indentString(truncateLongStrings(x.String()), indentation+1, false) } } } diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 60b1687..593bcdc 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.20.2" +const GOMEGA_VERSION = "1.27.5" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -86,12 +86,12 @@ func internalGomega(g Gomega) *internal.Gomega { // NewWithT takes a *testing.T and returns a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with // Gomega's rich ecosystem of matchers in standard `testing` test suits. // -// func TestFarmHasCow(t *testing.T) { -// g := gomega.NewWithT(t) +// func TestFarmHasCow(t *testing.T) { +// g := gomega.NewWithT(t) // -// f := farm.New([]string{"Cow", "Horse"}) -// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") -// } +// f := farm.New([]string{"Cow", "Horse"}) +// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } func NewWithT(t types.GomegaTestingT) *WithT { return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithT(t) } @@ -171,7 +171,8 @@ func ensureDefaultGomegaIsConfigured() { } // Ω wraps an actual value allowing assertions to be made on it: -// Ω("foo").Should(Equal("foo")) +// +// Ω("foo").Should(Equal("foo")) // // If Ω is passed more than one argument it will pass the *first* argument to the matcher. // All subsequent arguments will be required to be nil/zero. @@ -180,10 +181,13 @@ func ensureDefaultGomegaIsConfigured() { // a value and an error - a common patter in Go. // // For example, given a function with signature: -// func MyAmazingThing() (int, error) +// +// func MyAmazingThing() (int, error) // // Then: -// Ω(MyAmazingThing()).Should(Equal(3)) +// +// Ω(MyAmazingThing()).Should(Equal(3)) +// // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Ω and Expect are identical @@ -193,19 +197,23 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { } // Expect wraps an actual value allowing assertions to be made on it: -// Expect("foo").To(Equal("foo")) +// +// Expect("foo").To(Equal("foo")) // // If Expect is passed more than one argument it will pass the *first* argument to the matcher. // All subsequent arguments will be required to be nil/zero. // // This is convenient if you want to make an assertion on a method/function that returns -// a value and an error - a common patter in Go. +// a value and an error - a common pattern in Go. // // For example, given a function with signature: -// func MyAmazingThing() (int, error) +// +// func MyAmazingThing() (int, error) // // Then: -// Expect(MyAmazingThing()).Should(Equal(3)) +// +// Expect(MyAmazingThing()).Should(Equal(3)) +// // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Expect and Ω are identical @@ -215,7 +223,8 @@ func Expect(actual interface{}, extra ...interface{}) Assertion { } // ExpectWithOffset wraps an actual value allowing assertions to be made on it: -// ExpectWithOffset(1, "foo").To(Equal("foo")) +// +// ExpectWithOffset(1, "foo").To(Equal("foo")) // // Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument // that is used to modify the call-stack offset when computing line numbers. It is @@ -233,7 +242,7 @@ func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Asse Eventually enables making assertions on asynchronous behavior. Eventually checks that an assertion *eventually* passes. Eventually blocks when called and attempts an assertion periodically until it passes or a timeout occurs. Both the timeout and polling interval are configurable as optional arguments. -The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). +The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). In addition an optional context.Context can be passed in - Eventually will keep trying until either the timeout epxires or the context is cancelled, whichever comes first. Eventually works with any Gomega compatible matcher and supports making assertions against three categories of actual value: @@ -241,15 +250,15 @@ Eventually works with any Gomega compatible matcher and supports making assertio There are several examples of values that can change over time. These can be passed in to Eventually and will be passed to the matcher repeatedly until a match occurs. For example: - c := make(chan bool) - go DoStuff(c) - Eventually(c, "50ms").Should(BeClosed()) + c := make(chan bool) + go DoStuff(c) + Eventually(c, "50ms").Should(BeClosed()) will poll the channel repeatedly until it is closed. In this example `Eventually` will block until either the specified timeout of 50ms has elapsed or the channel is closed, whichever comes first. Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfully via: - Eventually(session).Should(gexec.Exit(0)) + Eventually(session).Should(gexec.Exit(0)) And the gomega/gbytes package allows you to monitor a streaming *gbytes.Buffer until a given string is seen: @@ -266,27 +275,51 @@ this will trigger Go's race detector as the goroutine polling via Eventually wil **Category 2: Make Eventually assertions on functions** -Eventually can be passed functions that **take no arguments** and **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher. +Eventually can be passed functions that **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher. For example: - Eventually(func() int { - return client.FetchCount() - }).Should(BeNumerically(">=", 17)) + Eventually(func() int { + return client.FetchCount() + }).Should(BeNumerically(">=", 17)) - will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17))) + will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17))) If multiple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. For example, consider a method that returns a value and an error: - func FetchFromDB() (string, error) + + func FetchFromDB() (string, error) Then - Eventually(FetchFromDB).Should(Equal("got it")) + + Eventually(FetchFromDB).Should(Equal("got it")) will pass only if and when the returned error is nil *and* the returned string satisfies the matcher. -It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. You should design your functions with this in mind. +Eventually can also accept functions that take arguments, however you must provide those arguments using .WithArguments(). For example, consider a function that takes a user-id and makes a network request to fetch a full name: + + func FetchFullName(userId int) (string, error) + +You can poll this function like so: + + Eventually(FetchFullName).WithArguments(1138).Should(Equal("Wookie")) + +It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. A common practice here is to use a context. Here's an example that combines Ginkgo's spec timeout support with Eventually: + + It("fetches the correct count", func(ctx SpecContext) { + Eventually(ctx, func() int { + return client.FetchCount(ctx, "/users") + }).Should(BeNumerically(">=", 17)) + }, SpecTimeout(time.Second)) + +you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in contexts play nicely with paseed-in arguments as long as the context appears first. You can rewrite the above example as: + + It("fetches the correct count", func(ctx SpecContext) { + Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) + }, SpecTimeout(time.Second)) + +Either way the context passd to Eventually is also passed to the underlying funciton. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. **Category 3: Making assertions _in_ the function passed into Eventually** @@ -306,22 +339,48 @@ will pass only if all the assertions in the polled function pass and the return Eventually also supports a special case polling function that takes a single Gomega argument and returns no values. Eventually assumes such a function is making assertions and is designed to work with the Succeed matcher to validate that all assertions have passed. For example: - Eventually(func(g Gomega) { - model, err := client.Find(1138) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(model.Reticulate()).To(Succeed()) - g.Expect(model.IsReticulated()).To(BeTrue()) - g.Expect(model.Save()).To(Succeed()) - }).Should(Succeed()) + Eventually(func(g Gomega) { + model, err := client.Find(1138) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(model.Reticulate()).To(Succeed()) + g.Expect(model.IsReticulated()).To(BeTrue()) + g.Expect(model.Save()).To(Succeed()) + }).Should(Succeed()) will rerun the function until all assertions pass. -`Eventually` specifying a timeout interval (and an optional polling interval) are -the same as `Eventually(...).WithTimeout` or `Eventually(...).WithTimeout(...).WithPolling`. +You can also pass additional arugments to functions that take a Gomega. The only rule is that the Gomega argument must be first. If you also want to pass the context attached to Eventually you must ensure that is the second argument. For example: + + Eventually(func(g Gomega, ctx context.Context, path string, expected ...string){ + tok, err := client.GetToken(ctx) + g.Expect(err).NotTo(HaveOccurred()) + + elements, err := client.Fetch(ctx, tok, path) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(elements).To(ConsistOf(expected)) + }).WithContext(ctx).WithArguments("/names", "Joe", "Jane", "Sam").Should(Succeed()) + +You can ensure that you get a number of consecutive successful tries before succeeding using `MustPassRepeatedly(int)`. For Example: + + int count := 0 + Eventually(func() bool { + count++ + return count > 2 + }).MustPassRepeatedly(2).Should(BeTrue()) + // Because we had to wait for 2 calls that returned true + Expect(count).To(Equal(3)) + +Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: + + Eventually(..., "1s", "2s", ctx).Should(...) + +is equivalent to + + Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ -func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { +func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Eventually(actual, intervals...) + return Default.Eventually(actualOrCtx, args...) } // EventuallyWithOffset operates like Eventually but takes an additional @@ -333,9 +392,9 @@ func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { // `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are // the same as `Eventually(...).WithOffset(...).WithTimeout` or // `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`. -func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { +func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.EventuallyWithOffset(offset, actual, intervals...) + return Default.EventuallyWithOffset(offset, actualOrCtx, args...) } /* @@ -343,19 +402,19 @@ Consistently, like Eventually, enables making assertions on asynchronous behavio Consistently blocks when called for a specified duration. During that duration Consistently repeatedly polls its matcher and ensures that it is satisfied. If the matcher is consistently satisfied, then Consistently will pass. Otherwise Consistently will fail. -Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional argument is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds. +Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional argument is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds. You can also pass in an optional context.Context - Consistently will exit early (with a failure) if the context is cancelled before the waiting duration expires. Consistently accepts the same three categories of actual as Eventually, check the Eventually docs to learn more. Consistently is useful in cases where you want to assert that something *does not happen* for a period of time. For example, you may want to assert that a goroutine does *not* send data down a channel. In this case you could write: - Consistently(channel, "200ms").ShouldNot(Receive()) + Consistently(channel, "200ms").ShouldNot(Receive()) This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received. */ -func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { +func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Consistently(actual, intervals...) + return Default.Consistently(actualOrCtx, args...) } // ConsistentlyWithOffset operates like Consistently but takes an additional @@ -364,11 +423,54 @@ func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { // // `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and // optional `WithTimeout` and `WithPolling`. -func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { +func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.ConsistentlyWithOffset(offset, actual, intervals...) + return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...) } +/* +StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal. + +You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution. + +You can also wrap StopTrying around an error with `StopTrying("message").Wrap(err)` and can attach additional objects via `StopTrying("message").Attach("description", object). When rendered, the signal will include the wrapped error and any attached objects rendered using Gomega's default formatting. + +Here are a couple of examples. This is how you might use StopTrying() as an error to signal that Eventually should stop: + + playerIndex, numPlayers := 0, 11 + Eventually(func() (string, error) { + if playerIndex == numPlayers { + return "", StopTrying("no more players left") + } + name := client.FetchPlayer(playerIndex) + playerIndex += 1 + return name, nil + }).Should(Equal("Patrick Mahomes")) + +And here's an example where `StopTrying().Now()` is called to halt execution immediately: + + Eventually(func() []string { + names, err := client.FetchAllPlayers() + if err == client.IRRECOVERABLE_ERROR { + StopTrying("Irrecoverable error occurred").Wrap(err).Now() + } + return names + }).Should(ContainElement("Patrick Mahomes")) +*/ +var StopTrying = internal.StopTrying + +/* +TryAgainAfter() allows you to adjust the polling interval for the _next_ iteration of `Eventually` or `Consistently`. Like `StopTrying` you can either return `TryAgainAfter` as an error or trigger it immedieately with `.Now()` + +When `TryAgainAfter(` is triggered `Eventually` and `Consistently` will wait for that duration. If a timeout occurs before the next poll is triggered both `Eventually` and `Consistently` will always fail with the content of the TryAgainAfter message. As with StopTrying you can `.Wrap()` and error and `.Attach()` additional objects to `TryAgainAfter`. +*/ +var TryAgainAfter = internal.TryAgainAfter + +/* +PollingSignalError is the error returned by StopTrying() and TryAgainAfter() +*/ +type PollingSignalError = internal.PollingSignalError + // SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. func SetDefaultEventuallyTimeout(t time.Duration) { Default.SetDefaultEventuallyTimeout(t) @@ -402,8 +504,8 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { // // Example: // -// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") -// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) type AsyncAssertion = types.AsyncAssertion // GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter. @@ -425,7 +527,7 @@ type GomegaAsyncAssertion = types.AsyncAssertion // // Example: // -// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) type Assertion = types.Assertion // GomegaAssertion is deprecated in favor of Assertion, which does not stutter. diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go index 7b7bdd1..08356a6 100644 --- a/vendor/github.com/onsi/gomega/internal/assertion.go +++ b/vendor/github.com/onsi/gomega/internal/assertion.go @@ -4,6 +4,7 @@ import ( "fmt" "reflect" + "github.com/onsi/gomega/format" "github.com/onsi/gomega/types" ) @@ -146,7 +147,12 @@ func vetActuals(actuals []interface{}, skipIndex int) (bool, string) { if actual != nil { zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() if !reflect.DeepEqual(zeroValue, actual) { - message := fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual) + var message string + if err, ok := actual.(error); ok { + message = fmt.Sprintf("Unexpected error: %s\n%s", err, format.Object(err, 1)) + } else { + message = fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual) + } return false, message } } diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index 126bbcb..1188b0b 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -1,15 +1,53 @@ package internal import ( + "context" "errors" "fmt" "reflect" "runtime" + "sync" "time" + "github.com/onsi/gomega/format" "github.com/onsi/gomega/types" ) +var errInterface = reflect.TypeOf((*error)(nil)).Elem() +var gomegaType = reflect.TypeOf((*types.Gomega)(nil)).Elem() +var contextType = reflect.TypeOf(new(context.Context)).Elem() + +type formattedGomegaError interface { + FormattedGomegaError() string +} + +type asyncPolledActualError struct { + message string +} + +func (err *asyncPolledActualError) Error() string { + return err.message +} + +func (err *asyncPolledActualError) FormattedGomegaError() string { + return err.message +} + +type contextWithAttachProgressReporter interface { + AttachProgressReporter(func() string) func() +} + +type asyncGomegaHaltExecutionError struct{} + +func (a asyncGomegaHaltExecutionError) GinkgoRecoverShouldIgnoreThisPanic() {} +func (a asyncGomegaHaltExecutionError) Error() string { + return `An assertion has failed in a goroutine. You should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. This will allow Ginkgo and Gomega to correctly capture and manage this panic.` +} + type AsyncAssertionType uint const ( @@ -17,71 +55,45 @@ const ( AsyncAssertionTypeConsistently ) +func (at AsyncAssertionType) String() string { + switch at { + case AsyncAssertionTypeEventually: + return "Eventually" + case AsyncAssertionTypeConsistently: + return "Consistently" + } + return "INVALID ASYNC ASSERTION TYPE" +} + type AsyncAssertion struct { asyncType AsyncAssertionType - actualIsFunc bool - actualValue interface{} - actualFunc func() ([]reflect.Value, error) + actualIsFunc bool + actual interface{} + argsToForward []interface{} - timeoutInterval time.Duration - pollingInterval time.Duration - offset int - g *Gomega + timeoutInterval time.Duration + pollingInterval time.Duration + mustPassRepeatedly int + ctx context.Context + offset int + g *Gomega } -func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { +func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion { out := &AsyncAssertion{ - asyncType: asyncType, - timeoutInterval: timeoutInterval, - pollingInterval: pollingInterval, - offset: offset, - g: g, + asyncType: asyncType, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + mustPassRepeatedly: mustPassRepeatedly, + offset: offset, + ctx: ctx, + g: g, } - switch actualType := reflect.TypeOf(actualInput); { - case actualInput == nil || actualType.Kind() != reflect.Func: - out.actualValue = actualInput - case actualType.NumIn() == 0 && actualType.NumOut() > 0: + out.actual = actualInput + if actualInput != nil && reflect.TypeOf(actualInput).Kind() == reflect.Func { out.actualIsFunc = true - out.actualFunc = func() ([]reflect.Value, error) { - return reflect.ValueOf(actualInput).Call([]reflect.Value{}), nil - } - case actualType.NumIn() == 1 && actualType.In(0).Implements(reflect.TypeOf((*types.Gomega)(nil)).Elem()): - out.actualIsFunc = true - out.actualFunc = func() (values []reflect.Value, err error) { - var assertionFailure error - assertionCapturingGomega := NewGomega(g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) { - skip := 0 - if len(callerSkip) > 0 { - skip = callerSkip[0] - } - _, file, line, _ := runtime.Caller(skip + 1) - assertionFailure = fmt.Errorf("Assertion in callback at %s:%d failed:\n%s", file, line, message) - panic("stop execution") - }) - - defer func() { - if actualType.NumOut() == 0 { - if assertionFailure == nil { - values = []reflect.Value{reflect.Zero(reflect.TypeOf((*error)(nil)).Elem())} - } else { - values = []reflect.Value{reflect.ValueOf(assertionFailure)} - } - } else { - err = assertionFailure - } - if e := recover(); e != nil && assertionFailure == nil { - panic(e) - } - }() - - values = reflect.ValueOf(actualInput).Call([]reflect.Value{reflect.ValueOf(assertionCapturingGomega)}) - return - } - default: - msg := fmt.Sprintf("The function passed to Gomega's async assertions should either take no arguments and return values, or take a single Gomega interface that it can use to make assertions within the body of the function. When taking a Gomega interface the function can optionally return values or return nothing. The function you passed takes %d arguments and returns %d values.", actualType.NumIn(), actualType.NumOut()) - g.Fail(msg, offset+4) } return out @@ -102,6 +114,31 @@ func (assertion *AsyncAssertion) WithPolling(interval time.Duration) types.Async return assertion } +func (assertion *AsyncAssertion) Within(timeout time.Duration) types.AsyncAssertion { + assertion.timeoutInterval = timeout + return assertion +} + +func (assertion *AsyncAssertion) ProbeEvery(interval time.Duration) types.AsyncAssertion { + assertion.pollingInterval = interval + return assertion +} + +func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAssertion { + assertion.ctx = ctx + return assertion +} + +func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion { + assertion.argsToForward = argsToForward + return assertion +} + +func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssertion { + assertion.mustPassRepeatedly = count + return assertion +} + func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() vetOptionalDescription("Asynchronous assertion", optionalDescription...) @@ -126,112 +163,409 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" } -func (assertion *AsyncAssertion) pollActual() (interface{}, error) { - if !assertion.actualIsFunc { - return assertion.actualValue, nil +func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) { + if len(values) == 0 { + return nil, &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType), + } } - values, err := assertion.actualFunc() - if err != nil { - return nil, err - } - extras := []interface{}{nil} - for _, value := range values[1:] { - extras = append(extras, value.Interface()) - } - success, message := vetActuals(extras, 0) - if !success { - return nil, errors.New(message) + actual := values[0].Interface() + if _, ok := AsPollingSignalError(actual); ok { + return actual, actual.(error) } - return values[0].Interface(), nil + var err error + for i, extraValue := range values[1:] { + extra := extraValue.Interface() + if extra == nil { + continue + } + if _, ok := AsPollingSignalError(extra); ok { + return actual, extra.(error) + } + extraType := reflect.TypeOf(extra) + zero := reflect.Zero(extraType).Interface() + if reflect.DeepEqual(extra, zero) { + continue + } + if i == len(values)-2 && extraType.Implements(errInterface) { + err = extra.(error) + } + if err == nil { + err = &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s had an unexpected non-nil/non-zero return value at index %d:\n%s", assertion.asyncType, i+1, format.Object(extra, 1)), + } + } + } + + return actual, err } -func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { - if assertion.actualIsFunc { - return true +func (assertion *AsyncAssertion) invalidFunctionError(t reflect.Type) error { + return fmt.Errorf(`The function passed to %s had an invalid signature of %s. Functions passed to %s must either: + + (a) have return values or + (b) take a Gomega interface as their first argument and use that Gomega instance to make assertions. + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, t, assertion.asyncType) +} + +func (assertion *AsyncAssertion) noConfiguredContextForFunctionError() error { + return fmt.Errorf(`The function passed to %s requested a context.Context, but no context has been provided. Please pass one in using %s().WithContext(). + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, assertion.asyncType) +} + +func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvided int) error { + have := "have" + if numProvided == 1 { + have = "has" } - return types.MatchMayChangeInTheFuture(matcher, value) + return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments. + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType) +} + +func (assertion *AsyncAssertion) invalidMustPassRepeatedlyError(reason string) error { + return fmt.Errorf(`Invalid use of MustPassRepeatedly with %s %s + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, reason) +} + +func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) { + if !assertion.actualIsFunc { + return func() (interface{}, error) { return assertion.actual, nil }, nil + } + actualValue := reflect.ValueOf(assertion.actual) + actualType := reflect.TypeOf(assertion.actual) + numIn, numOut, isVariadic := actualType.NumIn(), actualType.NumOut(), actualType.IsVariadic() + + if numIn == 0 && numOut == 0 { + return nil, assertion.invalidFunctionError(actualType) + } + takesGomega, takesContext := false, false + if numIn > 0 { + takesGomega, takesContext = actualType.In(0).Implements(gomegaType), actualType.In(0).Implements(contextType) + } + if takesGomega && numIn > 1 && actualType.In(1).Implements(contextType) { + takesContext = true + } + if takesContext && len(assertion.argsToForward) > 0 && reflect.TypeOf(assertion.argsToForward[0]).Implements(contextType) { + takesContext = false + } + if !takesGomega && numOut == 0 { + return nil, assertion.invalidFunctionError(actualType) + } + if takesContext && assertion.ctx == nil { + return nil, assertion.noConfiguredContextForFunctionError() + } + + var assertionFailure error + inValues := []reflect.Value{} + if takesGomega { + inValues = append(inValues, reflect.ValueOf(NewGomega(assertion.g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + _, file, line, _ := runtime.Caller(skip + 1) + assertionFailure = &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s failed at %s:%d with:\n%s", assertion.asyncType, file, line, message), + } + // we throw an asyncGomegaHaltExecutionError so that defer GinkgoRecover() can catch this error if the user makes an assertion in a goroutine + panic(asyncGomegaHaltExecutionError{}) + }))) + } + if takesContext { + inValues = append(inValues, reflect.ValueOf(assertion.ctx)) + } + for _, arg := range assertion.argsToForward { + inValues = append(inValues, reflect.ValueOf(arg)) + } + + if !isVariadic && numIn != len(inValues) { + return nil, assertion.argumentMismatchError(actualType, len(inValues)) + } else if isVariadic && len(inValues) < numIn-1 { + return nil, assertion.argumentMismatchError(actualType, len(inValues)) + } + + if assertion.mustPassRepeatedly != 1 && assertion.asyncType != AsyncAssertionTypeEventually { + return nil, assertion.invalidMustPassRepeatedlyError("it can only be used with Eventually") + } + if assertion.mustPassRepeatedly < 1 { + return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1") + } + + return func() (actual interface{}, err error) { + var values []reflect.Value + assertionFailure = nil + defer func() { + if numOut == 0 && takesGomega { + actual = assertionFailure + } else { + actual, err = assertion.processReturnValues(values) + _, isAsyncError := AsPollingSignalError(err) + if assertionFailure != nil && !isAsyncError { + err = assertionFailure + } + } + if e := recover(); e != nil { + if _, isAsyncError := AsPollingSignalError(e); isAsyncError { + err = e.(error) + } else if assertionFailure == nil { + panic(e) + } + } + }() + values = actualValue.Call(inValues) + return + }, nil +} + +func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time { + if assertion.timeoutInterval >= 0 { + return time.After(assertion.timeoutInterval) + } + + if assertion.asyncType == AsyncAssertionTypeConsistently { + return time.After(assertion.g.DurationBundle.ConsistentlyDuration) + } else { + if assertion.ctx == nil { + return time.After(assertion.g.DurationBundle.EventuallyTimeout) + } else { + return nil + } + } +} + +func (assertion *AsyncAssertion) afterPolling() <-chan time.Time { + if assertion.pollingInterval >= 0 { + return time.After(assertion.pollingInterval) + } + if assertion.asyncType == AsyncAssertionTypeConsistently { + return time.After(assertion.g.DurationBundle.ConsistentlyPollingInterval) + } else { + return time.After(assertion.g.DurationBundle.EventuallyPollingInterval) + } +} + +func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) { + return false + } + return true +} + +func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) { + defer func() { + if e := recover(); e != nil { + if _, isAsyncError := AsPollingSignalError(e); isAsyncError { + err = e.(error) + } else { + panic(e) + } + } + }() + + matches, err = matcher.Match(value) + + return } func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { timer := time.Now() - timeout := time.After(assertion.timeoutInterval) + timeout := assertion.afterTimeout() + lock := sync.Mutex{} - var matches bool - var err error - mayChange := true - value, err := assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) - } + var matches, hasLastValidActual bool + var actual, lastValidActual interface{} + var actualErr, matcherErr error + var oracleMatcherSaysStop bool assertion.g.THelper() - fail := func(preamble string) { - errMsg := "" - message := "" - if err != nil { - errMsg = "Error: " + err.Error() - } else { - if desiredMatch { - message = matcher.FailureMessage(value) - } else { - message = matcher.NegatedFailureMessage(value) - } - } - assertion.g.THelper() - description := assertion.buildDescription(optionalDescription...) - assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + pollActual, buildActualPollerErr := assertion.buildActualPoller() + if buildActualPollerErr != nil { + assertion.g.Fail(buildActualPollerErr.Error(), 2+assertion.offset) + return false } - if assertion.asyncType == AsyncAssertionTypeEventually { - for { - if err == nil && matches == desiredMatch { - return true - } + actual, actualErr = pollActual() + if actualErr == nil { + lastValidActual = actual + hasLastValidActual = true + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual) + matches, matcherErr = assertion.pollMatcher(matcher, actual) + } - if !mayChange { - fail("No future change is possible. Bailing out early") - return false + renderError := func(preamble string, err error) string { + message := "" + if pollingSignalErr, ok := AsPollingSignalError(err); ok { + message = err.Error() + for _, attachment := range pollingSignalErr.Attachments { + message += fmt.Sprintf("\n%s:\n", attachment.Description) + message += format.Object(attachment.Object, 1) } + } else { + message = preamble + "\n" + format.Object(err, 1) + } + return message + } - select { - case <-time.After(assertion.pollingInterval): - value, err = assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) + messageGenerator := func() string { + // can be called out of band by Ginkgo if the user requests a progress report + lock.Lock() + defer lock.Unlock() + message := "" + + if actualErr == nil { + if matcherErr == nil { + if desiredMatch != matches { + if desiredMatch { + message += matcher.FailureMessage(actual) + } else { + message += matcher.NegatedFailureMessage(actual) + } + } else { + if assertion.asyncType == AsyncAssertionTypeConsistently { + message += "There is no failure as the matcher passed to Consistently has not yet failed" + } else { + message += "There is no failure as the matcher passed to Eventually succeeded on its most recent iteration" + } + } + } else { + var fgErr formattedGomegaError + if errors.As(actualErr, &fgErr) { + message += fgErr.FormattedGomegaError() + "\n" + } else { + message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr) + } + } + } else { + var fgErr formattedGomegaError + if errors.As(actualErr, &fgErr) { + message += fgErr.FormattedGomegaError() + "\n" + } else { + message += renderError(fmt.Sprintf("The function passed to %s returned the following error:", assertion.asyncType), actualErr) + } + if hasLastValidActual { + message += fmt.Sprintf("\nAt one point, however, the function did return successfully.\nYet, %s failed because", assertion.asyncType) + _, e := matcher.Match(lastValidActual) + if e != nil { + message += renderError(" the matcher returned the following error:", e) + } else { + message += " the matcher was not satisfied:\n" + if desiredMatch { + message += matcher.FailureMessage(lastValidActual) + } else { + message += matcher.NegatedFailureMessage(lastValidActual) + } } - case <-timeout: - fail("Timed out") - return false } } - } else if assertion.asyncType == AsyncAssertionTypeConsistently { - for { - if !(err == nil && matches == desiredMatch) { + + description := assertion.buildDescription(optionalDescription...) + return fmt.Sprintf("%s%s", description, message) + } + + fail := func(preamble string) { + assertion.g.THelper() + assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s", preamble, time.Since(timer).Seconds(), messageGenerator()), 3+assertion.offset) + } + + var contextDone <-chan struct{} + if assertion.ctx != nil { + contextDone = assertion.ctx.Done() + if v, ok := assertion.ctx.Value("GINKGO_SPEC_CONTEXT").(contextWithAttachProgressReporter); ok { + detach := v.AttachProgressReporter(messageGenerator) + defer detach() + } + } + + // Used to count the number of times in a row a step passed + passedRepeatedlyCount := 0 + for { + var nextPoll <-chan time.Time = nil + var isTryAgainAfterError = false + + for _, err := range []error{actualErr, matcherErr} { + if pollingSignalErr, ok := AsPollingSignalError(err); ok { + if pollingSignalErr.IsStopTrying() { + fail("Told to stop trying") + return false + } + if pollingSignalErr.IsTryAgainAfter() { + nextPoll = time.After(pollingSignalErr.TryAgainDuration()) + isTryAgainAfterError = true + } + } + } + + if actualErr == nil && matcherErr == nil && matches == desiredMatch { + if assertion.asyncType == AsyncAssertionTypeEventually { + passedRepeatedlyCount += 1 + if passedRepeatedlyCount == assertion.mustPassRepeatedly { + return true + } + } + } else if !isTryAgainAfterError { + if assertion.asyncType == AsyncAssertionTypeConsistently { fail("Failed") return false } + // Reset the consecutive pass count + passedRepeatedlyCount = 0 + } - if !mayChange { + if oracleMatcherSaysStop { + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("No future change is possible. Bailing out early") + return false + } else { return true } + } - select { - case <-time.After(assertion.pollingInterval): - value, err = assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) + if nextPoll == nil { + nextPoll = assertion.afterPolling() + } + + select { + case <-nextPoll: + a, e := pollActual() + lock.Lock() + actual, actualErr = a, e + lock.Unlock() + if actualErr == nil { + lock.Lock() + lastValidActual = actual + hasLastValidActual = true + lock.Unlock() + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual) + m, e := assertion.pollMatcher(matcher, actual) + lock.Lock() + matches, matcherErr = m, e + lock.Unlock() + } + case <-contextDone: + fail("Context was cancelled") + return false + case <-timeout: + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("Timed out") + return false + } else { + if isTryAgainAfterError { + fail("Timed out while waiting on TryAgainAfter") + return false } - case <-timeout: return true } } } - - return false } diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go index af8d989..6e0d90d 100644 --- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -44,28 +44,28 @@ func durationFromEnv(key string, defaultDuration time.Duration) time.Duration { return duration } -func toDuration(input interface{}) time.Duration { +func toDuration(input interface{}) (time.Duration, error) { duration, ok := input.(time.Duration) if ok { - return duration + return duration, nil } value := reflect.ValueOf(input) kind := reflect.TypeOf(input).Kind() if reflect.Int <= kind && kind <= reflect.Int64 { - return time.Duration(value.Int()) * time.Second + return time.Duration(value.Int()) * time.Second, nil } else if reflect.Uint <= kind && kind <= reflect.Uint64 { - return time.Duration(value.Uint()) * time.Second + return time.Duration(value.Uint()) * time.Second, nil } else if reflect.Float32 <= kind && kind <= reflect.Float64 { - return time.Duration(value.Float() * float64(time.Second)) + return time.Duration(value.Float() * float64(time.Second)), nil } else if reflect.String == kind { duration, err := time.ParseDuration(value.String()) if err != nil { - panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + return 0, fmt.Errorf("%#v is not a valid parsable duration string: %w", input, err) } - return duration + return duration, nil } - panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) + return 0, fmt.Errorf("%#v is not a valid interval. Must be a time.Duration, a parsable duration string, or a number.", input) } diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index d26a674..de1f4f3 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -1,6 +1,7 @@ package internal import ( + "context" "time" "github.com/onsi/gomega/types" @@ -51,38 +52,64 @@ func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...inter return NewAssertion(actual, g, offset, extra...) } -func (g *Gomega) Eventually(actual interface{}, intervals ...interface{}) types.AsyncAssertion { - return g.EventuallyWithOffset(0, actual, intervals...) +func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...) } -func (g *Gomega) EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) types.AsyncAssertion { - timeoutInterval := g.DurationBundle.EventuallyTimeout - pollingInterval := g.DurationBundle.EventuallyPollingInterval +func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...) +} + +func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...) +} + +func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...) +} + +func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + baseOffset := 3 + timeoutInterval := -time.Duration(1) + pollingInterval := -time.Duration(1) + intervals := []interface{}{} + var ctx context.Context + + actual := actualOrCtx + startingIndex := 0 + if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 { + // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration + // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual + if _, err := toDuration(args[0]); err != nil { + ctx = actualOrCtx.(context.Context) + actual = args[0] + startingIndex = 1 + } + } + + for _, arg := range args[startingIndex:] { + switch v := arg.(type) { + case context.Context: + ctx = v + default: + intervals = append(intervals, arg) + } + } + var err error if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) + timeoutInterval, err = toDuration(intervals[0]) + if err != nil { + g.Fail(err.Error(), offset+baseOffset) + } } if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) + pollingInterval, err = toDuration(intervals[1]) + if err != nil { + g.Fail(err.Error(), offset+baseOffset) + } } - return NewAsyncAssertion(AsyncAssertionTypeEventually, actual, g, timeoutInterval, pollingInterval, offset) -} - -func (g *Gomega) Consistently(actual interface{}, intervals ...interface{}) types.AsyncAssertion { - return g.ConsistentlyWithOffset(0, actual, intervals...) -} - -func (g *Gomega) ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) types.AsyncAssertion { - timeoutInterval := g.DurationBundle.ConsistentlyDuration - pollingInterval := g.DurationBundle.ConsistentlyPollingInterval - if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) - } - if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) - } - - return NewAsyncAssertion(AsyncAssertionTypeConsistently, actual, g, timeoutInterval, pollingInterval, offset) + return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, 1, ctx, offset) } func (g *Gomega) SetDefaultEventuallyTimeout(t time.Duration) { diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go new file mode 100644 index 0000000..83b04b1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -0,0 +1,106 @@ +package internal + +import ( + "errors" + "fmt" + "time" +) + +type PollingSignalErrorType int + +const ( + PollingSignalErrorTypeStopTrying PollingSignalErrorType = iota + PollingSignalErrorTypeTryAgainAfter +) + +type PollingSignalError interface { + error + Wrap(err error) PollingSignalError + Attach(description string, obj any) PollingSignalError + Now() +} + +var StopTrying = func(message string) PollingSignalError { + return &PollingSignalErrorImpl{ + message: message, + pollingSignalErrorType: PollingSignalErrorTypeStopTrying, + } +} + +var TryAgainAfter = func(duration time.Duration) PollingSignalError { + return &PollingSignalErrorImpl{ + message: fmt.Sprintf("told to try again after %s", duration), + duration: duration, + pollingSignalErrorType: PollingSignalErrorTypeTryAgainAfter, + } +} + +type PollingSignalErrorAttachment struct { + Description string + Object any +} + +type PollingSignalErrorImpl struct { + message string + wrappedErr error + pollingSignalErrorType PollingSignalErrorType + duration time.Duration + Attachments []PollingSignalErrorAttachment +} + +func (s *PollingSignalErrorImpl) Wrap(err error) PollingSignalError { + s.wrappedErr = err + return s +} + +func (s *PollingSignalErrorImpl) Attach(description string, obj any) PollingSignalError { + s.Attachments = append(s.Attachments, PollingSignalErrorAttachment{description, obj}) + return s +} + +func (s *PollingSignalErrorImpl) Error() string { + if s.wrappedErr == nil { + return s.message + } else { + return s.message + ": " + s.wrappedErr.Error() + } +} + +func (s *PollingSignalErrorImpl) Unwrap() error { + if s == nil { + return nil + } + return s.wrappedErr +} + +func (s *PollingSignalErrorImpl) Now() { + panic(s) +} + +func (s *PollingSignalErrorImpl) IsStopTrying() bool { + return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying +} + +func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool { + return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter +} + +func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration { + return s.duration +} + +func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) { + if actual == nil { + return nil, false + } + if actualErr, ok := actual.(error); ok { + var target *PollingSignalErrorImpl + if errors.As(actualErr, &target) { + return target, true + } else { + return nil, false + } + } + + return nil, false +} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index d6a0990..44056ad 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -8,27 +8,28 @@ import ( "github.com/onsi/gomega/types" ) -//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about -//types when performing comparisons. -//It is an error for both actual and expected to be nil. Use BeNil() instead. +// Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about +// types when performing comparisons. +// It is an error for both actual and expected to be nil. Use BeNil() instead. func Equal(expected interface{}) types.GomegaMatcher { return &matchers.EqualMatcher{ Expected: expected, } } -//BeEquivalentTo is more lax than Equal, allowing equality between different types. -//This is done by converting actual to have the type of expected before -//attempting equality with reflect.DeepEqual. -//It is an error for actual and expected to be nil. Use BeNil() instead. +// BeEquivalentTo is more lax than Equal, allowing equality between different types. +// This is done by converting actual to have the type of expected before +// attempting equality with reflect.DeepEqual. +// It is an error for actual and expected to be nil. Use BeNil() instead. func BeEquivalentTo(expected interface{}) types.GomegaMatcher { return &matchers.BeEquivalentToMatcher{ Expected: expected, } } -//BeComparableTo uses gocmp.Equal to compare. You can pass cmp.Option as options. -//It is an error for actual and expected to be nil. Use BeNil() instead. +// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison. +// You can pass cmp.Option as options. +// It is an error for actual and expected to be nil. Use BeNil() instead. func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher { return &matchers.BeComparableToMatcher{ Expected: expected, @@ -36,116 +37,124 @@ func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatche } } -//BeIdenticalTo uses the == operator to compare actual with expected. -//BeIdenticalTo is strict about types when performing comparisons. -//It is an error for both actual and expected to be nil. Use BeNil() instead. +// BeIdenticalTo uses the == operator to compare actual with expected. +// BeIdenticalTo is strict about types when performing comparisons. +// It is an error for both actual and expected to be nil. Use BeNil() instead. func BeIdenticalTo(expected interface{}) types.GomegaMatcher { return &matchers.BeIdenticalToMatcher{ Expected: expected, } } -//BeNil succeeds if actual is nil +// BeNil succeeds if actual is nil func BeNil() types.GomegaMatcher { return &matchers.BeNilMatcher{} } -//BeTrue succeeds if actual is true +// BeTrue succeeds if actual is true func BeTrue() types.GomegaMatcher { return &matchers.BeTrueMatcher{} } -//BeFalse succeeds if actual is false +// BeFalse succeeds if actual is false func BeFalse() types.GomegaMatcher { return &matchers.BeFalseMatcher{} } -//HaveOccurred succeeds if actual is a non-nil error -//The typical Go error checking pattern looks like: -// err := SomethingThatMightFail() -// Expect(err).ShouldNot(HaveOccurred()) +// HaveOccurred succeeds if actual is a non-nil error +// The typical Go error checking pattern looks like: +// +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) func HaveOccurred() types.GomegaMatcher { return &matchers.HaveOccurredMatcher{} } -//Succeed passes if actual is a nil error -//Succeed is intended to be used with functions that return a single error value. Instead of -// err := SomethingThatMightFail() -// Expect(err).ShouldNot(HaveOccurred()) +// Succeed passes if actual is a nil error +// Succeed is intended to be used with functions that return a single error value. Instead of // -//You can write: -// Expect(SomethingThatMightFail()).Should(Succeed()) +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) // -//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect -//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. -//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. +// You can write: +// +// Expect(SomethingThatMightFail()).Should(Succeed()) +// +// It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect +// functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. +// This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. func Succeed() types.GomegaMatcher { return &matchers.SucceedMatcher{} } -//MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// MatchError succeeds if actual is a non-nil error that matches the passed in string/error. // -//These are valid use-cases: -// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" -// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// These are valid use-cases: // -//It is an error for err to be nil or an object that does not implement the Error interface +// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" +// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// +// It is an error for err to be nil or an object that does not implement the Error interface func MatchError(expected interface{}) types.GomegaMatcher { return &matchers.MatchErrorMatcher{ Expected: expected, } } -//BeClosed succeeds if actual is a closed channel. -//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil +// BeClosed succeeds if actual is a closed channel. +// It is an error to pass a non-channel to BeClosed, it is also an error to pass nil // -//In order to check whether or not the channel is closed, Gomega must try to read from the channel -//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about -//values coming down the channel. +// In order to check whether or not the channel is closed, Gomega must try to read from the channel +// (even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about +// values coming down the channel. // -//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before -//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). +// Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before +// asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). // -//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. +// Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. func BeClosed() types.GomegaMatcher { return &matchers.BeClosedMatcher{} } -//Receive succeeds if there is a value to be received on actual. -//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// Receive succeeds if there is a value to be received on actual. +// Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. // -//Receive returns immediately and never blocks: +// Receive returns immediately and never blocks: // -//- If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// - If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. // -//- If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// - If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. // -//- If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. +// - If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. // -//If you have a go-routine running in the background that will write to channel `c` you can: -// Eventually(c).Should(Receive()) +// If you have a go-routine running in the background that will write to channel `c` you can: // -//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) +// Eventually(c).Should(Receive()) // -//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: -// Consistently(c).ShouldNot(Receive()) +// This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) // -//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: -// Expect(c).Should(Receive(Equal("foo"))) +// A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: // -//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. +// Consistently(c).ShouldNot(Receive()) // -//Passing Receive a matcher is especially useful when paired with Eventually: +// You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: // -// Eventually(c).Should(Receive(ContainSubstring("bar"))) +// Expect(c).Should(Receive(Equal("foo"))) // -//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. +// When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. // -//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: -// var myThing thing -// Eventually(thingChan).Should(Receive(&myThing)) -// Expect(myThing.Sprocket).Should(Equal("foo")) -// Expect(myThing.IsValid()).Should(BeTrue()) +// Passing Receive a matcher is especially useful when paired with Eventually: +// +// Eventually(c).Should(Receive(ContainSubstring("bar"))) +// +// will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. +// +// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing)) +// Expect(myThing.Sprocket).Should(Equal("foo")) +// Expect(myThing.IsValid()).Should(BeTrue()) func Receive(args ...interface{}) types.GomegaMatcher { var arg interface{} if len(args) > 0 { @@ -157,27 +166,27 @@ func Receive(args ...interface{}) types.GomegaMatcher { } } -//BeSent succeeds if a value can be sent to actual. -//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. -//In addition, actual must not be closed. +// BeSent succeeds if a value can be sent to actual. +// Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. +// In addition, actual must not be closed. // -//BeSent never blocks: +// BeSent never blocks: // -//- If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately -//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout -//- If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately +// - If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately +// - If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout +// - If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately // -//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). -//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. +// Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). +// Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. func BeSent(arg interface{}) types.GomegaMatcher { return &matchers.BeSentMatcher{ Arg: arg, } } -//MatchRegexp succeeds if actual is a string or stringer that matches the -//passed-in regexp. Optional arguments can be provided to construct a regexp -//via fmt.Sprintf(). +// MatchRegexp succeeds if actual is a string or stringer that matches the +// passed-in regexp. Optional arguments can be provided to construct a regexp +// via fmt.Sprintf(). func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { return &matchers.MatchRegexpMatcher{ Regexp: regexp, @@ -185,9 +194,9 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { } } -//ContainSubstring succeeds if actual is a string or stringer that contains the -//passed-in substring. Optional arguments can be provided to construct the substring -//via fmt.Sprintf(). +// ContainSubstring succeeds if actual is a string or stringer that contains the +// passed-in substring. Optional arguments can be provided to construct the substring +// via fmt.Sprintf(). func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { return &matchers.ContainSubstringMatcher{ Substr: substr, @@ -195,9 +204,9 @@ func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { } } -//HavePrefix succeeds if actual is a string or stringer that contains the -//passed-in string as a prefix. Optional arguments can be provided to construct -//via fmt.Sprintf(). +// HavePrefix succeeds if actual is a string or stringer that contains the +// passed-in string as a prefix. Optional arguments can be provided to construct +// via fmt.Sprintf(). func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { return &matchers.HavePrefixMatcher{ Prefix: prefix, @@ -205,9 +214,9 @@ func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { } } -//HaveSuffix succeeds if actual is a string or stringer that contains the -//passed-in string as a suffix. Optional arguments can be provided to construct -//via fmt.Sprintf(). +// HaveSuffix succeeds if actual is a string or stringer that contains the +// passed-in string as a suffix. Optional arguments can be provided to construct +// via fmt.Sprintf(). func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { return &matchers.HaveSuffixMatcher{ Suffix: suffix, @@ -215,73 +224,74 @@ func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { } } -//MatchJSON succeeds if actual is a string or stringer of JSON that matches -//the expected JSON. The JSONs are decoded and the resulting objects are compared via -//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +// MatchJSON succeeds if actual is a string or stringer of JSON that matches +// the expected JSON. The JSONs are decoded and the resulting objects are compared via +// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. func MatchJSON(json interface{}) types.GomegaMatcher { return &matchers.MatchJSONMatcher{ JSONToMatch: json, } } -//MatchXML succeeds if actual is a string or stringer of XML that matches -//the expected XML. The XMLs are decoded and the resulting objects are compared via -//reflect.DeepEqual so things like whitespaces shouldn't matter. +// MatchXML succeeds if actual is a string or stringer of XML that matches +// the expected XML. The XMLs are decoded and the resulting objects are compared via +// reflect.DeepEqual so things like whitespaces shouldn't matter. func MatchXML(xml interface{}) types.GomegaMatcher { return &matchers.MatchXMLMatcher{ XMLToMatch: xml, } } -//MatchYAML succeeds if actual is a string or stringer of YAML that matches -//the expected YAML. The YAML's are decoded and the resulting objects are compared via -//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +// MatchYAML succeeds if actual is a string or stringer of YAML that matches +// the expected YAML. The YAML's are decoded and the resulting objects are compared via +// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. func MatchYAML(yaml interface{}) types.GomegaMatcher { return &matchers.MatchYAMLMatcher{ YAMLToMatch: yaml, } } -//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. +// BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. func BeEmpty() types.GomegaMatcher { return &matchers.BeEmptyMatcher{} } -//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. +// HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. func HaveLen(count int) types.GomegaMatcher { return &matchers.HaveLenMatcher{ Count: count, } } -//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. +// HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. func HaveCap(count int) types.GomegaMatcher { return &matchers.HaveCapMatcher{ Count: count, } } -//BeZero succeeds if actual is the zero value for its type or if actual is nil. +// BeZero succeeds if actual is the zero value for its type or if actual is nil. func BeZero() types.GomegaMatcher { return &matchers.BeZeroMatcher{} } -//ContainElement succeeds if actual contains the passed in element. By default -//ContainElement() uses Equal() to perform the match, however a matcher can be -//passed in instead: -// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// ContainElement succeeds if actual contains the passed in element. By default +// ContainElement() uses Equal() to perform the match, however a matcher can be +// passed in instead: // -//Actual must be an array, slice or map. For maps, ContainElement searches -//through the map's values. +// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) // -//If you want to have a copy of the matching element(s) found you can pass a -//pointer to a variable of the appropriate type. If the variable isn't a slice -//or map, then exactly one match will be expected and returned. If the variable -//is a slice or map, then at least one match is expected and all matches will be -//stored in the variable. +// Actual must be an array, slice or map. For maps, ContainElement searches +// through the map's values. // -// var findings []string -// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings))) +// If you want to have a copy of the matching element(s) found you can pass a +// pointer to a variable of the appropriate type. If the variable isn't a slice +// or map, then exactly one match will be expected and returned. If the variable +// is a slice or map, then at least one match is expected and all matches will be +// stored in the variable. +// +// var findings []string +// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings))) func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher { return &matchers.ContainElementMatcher{ Element: element, @@ -289,86 +299,116 @@ func ContainElement(element interface{}, result ...interface{}) types.GomegaMatc } } -//BeElementOf succeeds if actual is contained in the passed in elements. -//BeElementOf() always uses Equal() to perform the match. -//When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves -//as the reverse of ContainElement() that operates with Equal() to perform the match. -// Expect(2).Should(BeElementOf([]int{1, 2})) -// Expect(2).Should(BeElementOf([2]int{1, 2})) -//Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...): -// Expect(2).Should(BeElementOf(1, 2)) +// BeElementOf succeeds if actual is contained in the passed in elements. +// BeElementOf() always uses Equal() to perform the match. +// When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves +// as the reverse of ContainElement() that operates with Equal() to perform the match. // -//Actual must be typed. +// Expect(2).Should(BeElementOf([]int{1, 2})) +// Expect(2).Should(BeElementOf([2]int{1, 2})) +// +// Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...): +// +// Expect(2).Should(BeElementOf(1, 2)) +// +// Actual must be typed. func BeElementOf(elements ...interface{}) types.GomegaMatcher { return &matchers.BeElementOfMatcher{ Elements: elements, } } -//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. -//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// BeKeyOf succeeds if actual is contained in the keys of the passed in map. +// BeKeyOf() always uses Equal() to perform the match between actual and the map keys. // -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false})) +func BeKeyOf(element interface{}) types.GomegaMatcher { + return &matchers.BeKeyOfMatcher{ + Map: element, + } +} + +// ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. +// By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // -//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) // -//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it -//is the only element passed in to ConsistOf: +// Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. // -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it +// is the only element passed in to ConsistOf: // -//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// +// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. func ConsistOf(elements ...interface{}) types.GomegaMatcher { return &matchers.ConsistOfMatcher{ Elements: elements, } } -//ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter. -//By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // -// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar")) -// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", ContainSubstring("Bar"))) +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo"))) // -//Actual must be an array, slice or map. -//For maps, ContainElements searches through the map's values. +// Actual must be an array or slice. +func HaveExactElements(elements ...interface{}) types.GomegaMatcher { + return &matchers.HaveExactElementsMatcher{ + Elements: elements, + } +} + +// ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter. +// By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar")) +// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo")) +// +// Actual must be an array, slice or map. +// For maps, ContainElements searches through the map's values. func ContainElements(elements ...interface{}) types.GomegaMatcher { return &matchers.ContainElementsMatcher{ Elements: elements, } } -//HaveEach succeeds if actual solely contains elements that match the passed in element. -//Please note that if actual is empty, HaveEach always will succeed. -//By default HaveEach() uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect([]string{"Foo", "FooBar"}).Should(HaveEach(ContainSubstring("Foo"))) +// HaveEach succeeds if actual solely contains elements that match the passed in element. +// Please note that if actual is empty, HaveEach always will succeed. +// By default HaveEach() uses Equal() to perform the match, however a +// matcher can be passed in instead: // -//Actual must be an array, slice or map. -//For maps, HaveEach searches through the map's values. +// Expect([]string{"Foo", "FooBar"}).Should(HaveEach(ContainSubstring("Foo"))) +// +// Actual must be an array, slice or map. +// For maps, HaveEach searches through the map's values. func HaveEach(element interface{}) types.GomegaMatcher { return &matchers.HaveEachMatcher{ Element: element, } } -//HaveKey succeeds if actual is a map with the passed in key. -//By default HaveKey uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) +// HaveKey succeeds if actual is a map with the passed in key. +// By default HaveKey uses Equal() to perform the match, however a +// matcher can be passed in instead: +// +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) func HaveKey(key interface{}) types.GomegaMatcher { return &matchers.HaveKeyMatcher{ Key: key, } } -//HaveKeyWithValue succeeds if actual is a map with the passed in key and value. -//By default HaveKeyWithValue uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) -// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) +// HaveKeyWithValue succeeds if actual is a map with the passed in key and value. +// By default HaveKeyWithValue uses Equal() to perform the match, however a +// matcher can be passed in instead: +// +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { return &matchers.HaveKeyWithValueMatcher{ Key: key, @@ -376,27 +416,27 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { } } -//HaveField succeeds if actual is a struct and the value at the passed in field -//matches the passed in matcher. By default HaveField used Equal() to perform the match, -//however a matcher can be passed in in stead. +// HaveField succeeds if actual is a struct and the value at the passed in field +// matches the passed in matcher. By default HaveField used Equal() to perform the match, +// however a matcher can be passed in in stead. // -//The field must be a string that resolves to the name of a field in the struct. Structs can be traversed -//using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked. -//Such methods must take no arguments and return a single value: +// The field must be a string that resolves to the name of a field in the struct. Structs can be traversed +// using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked. +// Such methods must take no arguments and return a single value: // -// type Book struct { -// Title string -// Author Person -// } -// type Person struct { -// FirstName string -// LastName string -// DOB time.Time -// } -// Expect(book).To(HaveField("Title", "Les Miserables")) -// Expect(book).To(HaveField("Title", ContainSubstring("Les")) -// Expect(book).To(HaveField("Author.FirstName", Equal("Victor")) -// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900)) +// type Book struct { +// Title string +// Author Person +// } +// type Person struct { +// FirstName string +// LastName string +// DOB time.Time +// } +// Expect(book).To(HaveField("Title", "Les Miserables")) +// Expect(book).To(HaveField("Title", ContainSubstring("Les")) +// Expect(book).To(HaveField("Author.FirstName", Equal("Victor")) +// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900)) func HaveField(field string, expected interface{}) types.GomegaMatcher { return &matchers.HaveFieldMatcher{ Field: field, @@ -410,7 +450,7 @@ func HaveField(field string, expected interface{}) types.GomegaMatcher { // HaveExistingField can be combined with HaveField in order to cover use cases // with optional fields. HaveField alone would trigger an error in such situations. // -// Expect(MrHarmless).NotTo(And(HaveExistingField("Title"), HaveField("Title", "Supervillain"))) +// Expect(MrHarmless).NotTo(And(HaveExistingField("Title"), HaveField("Title", "Supervillain"))) func HaveExistingField(field string) types.GomegaMatcher { return &matchers.HaveExistingFieldMatcher{ Field: field, @@ -428,26 +468,27 @@ func HaveExistingField(field string) types.GomegaMatcher { // be a pointer (as gstruct.PointTo does) but instead also accepts non-pointer // and even interface values. // -// actual := 42 -// Expect(actual).To(HaveValue(42)) -// Expect(&actual).To(HaveValue(42)) +// actual := 42 +// Expect(actual).To(HaveValue(42)) +// Expect(&actual).To(HaveValue(42)) func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher { return &matchers.HaveValueMatcher{ Matcher: matcher, } } -//BeNumerically performs numerical assertions in a type-agnostic way. -//Actual and expected should be numbers, though the specific type of -//number is irrelevant (float32, float64, uint8, etc...). +// BeNumerically performs numerical assertions in a type-agnostic way. +// Actual and expected should be numbers, though the specific type of +// number is irrelevant (float32, float64, uint8, etc...). // -//There are six, self-explanatory, supported comparators: -// Expect(1.0).Should(BeNumerically("==", 1)) -// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01)) -// Expect(1.0).Should(BeNumerically(">", 0.9)) -// Expect(1.0).Should(BeNumerically(">=", 1.0)) -// Expect(1.0).Should(BeNumerically("<", 3)) -// Expect(1.0).Should(BeNumerically("<=", 1.0)) +// There are six, self-explanatory, supported comparators: +// +// Expect(1.0).Should(BeNumerically("==", 1)) +// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01)) +// Expect(1.0).Should(BeNumerically(">", 0.9)) +// Expect(1.0).Should(BeNumerically(">=", 1.0)) +// Expect(1.0).Should(BeNumerically("<", 3)) +// Expect(1.0).Should(BeNumerically("<=", 1.0)) func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { return &matchers.BeNumericallyMatcher{ Comparator: comparator, @@ -455,10 +496,11 @@ func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatc } } -//BeTemporally compares time.Time's like BeNumerically -//Actual and expected must be time.Time. The comparators are the same as for BeNumerically -// Expect(time.Now()).Should(BeTemporally(">", time.Time{})) -// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) +// BeTemporally compares time.Time's like BeNumerically +// Actual and expected must be time.Time. The comparators are the same as for BeNumerically +// +// Expect(time.Now()).Should(BeTemporally(">", time.Time{})) +// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher { return &matchers.BeTemporallyMatcher{ Comparator: comparator, @@ -467,58 +509,61 @@ func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Dura } } -//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. -//It will return an error when one of the values is nil. -// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values -// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type -// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type -// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) +// BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. +// It will return an error when one of the values is nil. +// +// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values +// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type +// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type +// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { return &matchers.AssignableToTypeOfMatcher{ Expected: expected, } } -//Panic succeeds if actual is a function that, when invoked, panics. -//Actual must be a function that takes no arguments and returns no results. +// Panic succeeds if actual is a function that, when invoked, panics. +// Actual must be a function that takes no arguments and returns no results. func Panic() types.GomegaMatcher { return &matchers.PanicMatcher{} } -//PanicWith succeeds if actual is a function that, when invoked, panics with a specific value. -//Actual must be a function that takes no arguments and returns no results. +// PanicWith succeeds if actual is a function that, when invoked, panics with a specific value. +// Actual must be a function that takes no arguments and returns no results. // -//By default PanicWith uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`))) +// By default PanicWith uses Equal() to perform the match, however a +// matcher can be passed in instead: +// +// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`))) func PanicWith(expected interface{}) types.GomegaMatcher { return &matchers.PanicMatcher{Expected: expected} } -//BeAnExistingFile succeeds if a file exists. -//Actual must be a string representing the abs path to the file being checked. +// BeAnExistingFile succeeds if a file exists. +// Actual must be a string representing the abs path to the file being checked. func BeAnExistingFile() types.GomegaMatcher { return &matchers.BeAnExistingFileMatcher{} } -//BeARegularFile succeeds if a file exists and is a regular file. -//Actual must be a string representing the abs path to the file being checked. +// BeARegularFile succeeds if a file exists and is a regular file. +// Actual must be a string representing the abs path to the file being checked. func BeARegularFile() types.GomegaMatcher { return &matchers.BeARegularFileMatcher{} } -//BeADirectory succeeds if a file exists and is a directory. -//Actual must be a string representing the abs path to the file being checked. +// BeADirectory succeeds if a file exists and is a directory. +// Actual must be a string representing the abs path to the file being checked. func BeADirectory() types.GomegaMatcher { return &matchers.BeADirectoryMatcher{} } -//HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches. -//Actual must be either a *http.Response or *httptest.ResponseRecorder. -//Expected must be either an int or a string. -// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 -// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" -// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204 +// HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches. +// Actual must be either a *http.Response or *httptest.ResponseRecorder. +// Expected must be either an int or a string. +// +// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 +// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" +// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204 func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher { return &matchers.HaveHTTPStatusMatcher{Expected: expected} } @@ -541,63 +586,70 @@ func HaveHTTPBody(expected interface{}) types.GomegaMatcher { return &matchers.HaveHTTPBodyMatcher{Expected: expected} } -//And succeeds only if all of the given matchers succeed. -//The matchers are tried in order, and will fail-fast if one doesn't succeed. -// Expect("hi").To(And(HaveLen(2), Equal("hi")) +// And succeeds only if all of the given matchers succeed. +// The matchers are tried in order, and will fail-fast if one doesn't succeed. // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// Expect("hi").To(And(HaveLen(2), Equal("hi")) +// +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func And(ms ...types.GomegaMatcher) types.GomegaMatcher { return &matchers.AndMatcher{Matchers: ms} } -//SatisfyAll is an alias for And(). -// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) +// SatisfyAll is an alias for And(). +// +// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher { return And(matchers...) } -//Or succeeds if any of the given matchers succeed. -//The matchers are tried in order and will return immediately upon the first successful match. -// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) +// Or succeeds if any of the given matchers succeed. +// The matchers are tried in order and will return immediately upon the first successful match. // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) +// +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func Or(ms ...types.GomegaMatcher) types.GomegaMatcher { return &matchers.OrMatcher{Matchers: ms} } -//SatisfyAny is an alias for Or(). -// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) +// SatisfyAny is an alias for Or(). +// +// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher { return Or(matchers...) } -//Not negates the given matcher; it succeeds if the given matcher fails. -// Expect(1).To(Not(Equal(2)) +// Not negates the given matcher; it succeeds if the given matcher fails. // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// Expect(1).To(Not(Equal(2)) +// +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func Not(matcher types.GomegaMatcher) types.GomegaMatcher { return &matchers.NotMatcher{Matcher: matcher} } -//WithTransform applies the `transform` to the actual value and matches it against `matcher`. -//The given transform must be either a function of one parameter that returns one value or a +// WithTransform applies the `transform` to the actual value and matches it against `matcher`. +// The given transform must be either a function of one parameter that returns one value or a // function of one parameter that returns two values, where the second value must be of the // error type. -// var plus1 = func(i int) int { return i + 1 } -// Expect(1).To(WithTransform(plus1, Equal(2)) // -// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" } -// Expect(1).To(WithTransform(failingplus1, Equal(2))) +// var plus1 = func(i int) int { return i + 1 } +// Expect(1).To(WithTransform(plus1, Equal(2)) // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" } +// Expect(1).To(WithTransform(failingplus1, Equal(2))) +// +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { return matchers.NewWithTransformMatcher(transform, matcher) } -//Satisfy matches the actual value against the `predicate` function. -//The given predicate must be a function of one paramter that returns bool. -// var isEven = func(i int) bool { return i%2 == 0 } -// Expect(2).To(Satisfy(isEven)) +// Satisfy matches the actual value against the `predicate` function. +// The given predicate must be a function of one paramter that returns bool. +// +// var isEven = func(i int) bool { return i%2 == 0 } +// Expect(2).To(Satisfy(isEven)) func Satisfy(predicate interface{}) types.GomegaMatcher { return matchers.NewSatisfyMatcher(predicate) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go new file mode 100644 index 0000000..449a291 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go @@ -0,0 +1,45 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeKeyOfMatcher struct { + Map interface{} +} + +func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(matcher.Map) { + return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type") + } + + if reflect.TypeOf(actual) == nil { + return false, fmt.Errorf("BeKeyOf matcher expects actual to be typed") + } + + var lastError error + for _, key := range reflect.ValueOf(matcher.Map).MapKeys() { + matcher := &EqualMatcher{Expected: key.Interface()} + success, err := matcher.Match(actual) + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map))) +} + +func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map))) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go new file mode 100644 index 0000000..7cce776 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -0,0 +1,83 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type mismatchFailure struct { + failure string + index int +} + +type HaveExactElementsMatcher struct { + Elements []interface{} + mismatchFailures []mismatchFailure + missingIndex int + extraIndex int +} + +func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) { + matcher.resetState() + + if isMap(actual) { + return false, fmt.Errorf("error") + } + + matchers := matchers(matcher.Elements) + values := valuesOf(actual) + + lenMatchers := len(matchers) + lenValues := len(values) + + for i := 0; i < lenMatchers || i < lenValues; i++ { + if i >= lenMatchers { + matcher.extraIndex = i + continue + } + + if i >= lenValues { + matcher.missingIndex = i + return + } + + elemMatcher := matchers[i].(omegaMatcher) + match, err := elemMatcher.Match(values[i]) + if err != nil || !match { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: elemMatcher.FailureMessage(values[i]), + }) + } + } + + return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil +} + +func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { + message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements)) + if matcher.missingIndex > 0 { + message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex) + } + if matcher.extraIndex > 0 { + message = fmt.Sprintf("%s\nthe extra elements start from index %d", message, matcher.extraIndex) + } + if len(matcher.mismatchFailures) != 0 { + message = fmt.Sprintf("%s\nthe mismatch indexes were:", message) + } + for _, mismatch := range matcher.mismatchFailures { + message = fmt.Sprintf("%s\n%d: %s", message, mismatch.index, mismatch.failure) + } + return +} + +func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) +} + +func (matcher *HaveExactElementsMatcher) resetState() { + matcher.mismatchFailures = nil + matcher.missingIndex = 0 + matcher.extraIndex = 0 +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go index 5bcfdd2..22a1b67 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -31,5 +31,5 @@ func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message } func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Unexpected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "occurred") + return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred") } diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index c8993a8..827475e 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -25,7 +25,17 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e expected := matcher.Expected if isError(expected) { - return reflect.DeepEqual(actualErr, expected) || errors.Is(actualErr, expected.(error)), nil + // first try the built-in errors.Is + if errors.Is(actualErr, expected.(error)) { + return true, nil + } + // if not, try DeepEqual along the error chain + for unwrapped := actualErr; unwrapped != nil; unwrapped = errors.Unwrap(unwrapped) { + if reflect.DeepEqual(unwrapped, expected) { + return true, nil + } + } + return false, nil } if isString(expected) { diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go index 721ed55..327350f 100644 --- a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -1,11 +1,16 @@ package matchers import ( + "errors" "fmt" "github.com/onsi/gomega/format" ) +type formattedGomegaError interface { + FormattedGomegaError() string +} + type SucceedMatcher struct { } @@ -25,7 +30,11 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1)) + var fgErr formattedGomegaError + if errors.As(actual.(error), &fgErr) { + return fgErr.FormattedGomegaError() + } + return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1)) } func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { diff --git a/vendor/github.com/onsi/gomega/tools b/vendor/github.com/onsi/gomega/tools deleted file mode 100644 index e4195cf..0000000 --- a/vendor/github.com/onsi/gomega/tools +++ /dev/null @@ -1,8 +0,0 @@ -//go:build tools -// +build tools - -package main - -import ( - _ "github.com/onsi/ginkgo/v2/ginkgo" -) diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index c315ef0..7c7adb9 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -1,12 +1,13 @@ package types import ( + "context" "time" ) type GomegaFailHandler func(message string, callerSkip ...int) -//A simple *testing.T interface wrapper +// A simple *testing.T interface wrapper type GomegaTestingT interface { Helper() Fatalf(format string, args ...interface{}) @@ -18,11 +19,11 @@ type Gomega interface { Expect(actual interface{}, extra ...interface{}) Assertion ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion - Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion - EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion + Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion + EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion - Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion - ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion + Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion + ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion SetDefaultEventuallyTimeout(time.Duration) SetDefaultEventuallyPollingInterval(time.Duration) @@ -30,9 +31,9 @@ type Gomega interface { SetDefaultConsistentlyPollingInterval(time.Duration) } -//All Gomega matchers must implement the GomegaMatcher interface +// All Gomega matchers must implement the GomegaMatcher interface // -//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers +// For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers type GomegaMatcher interface { Match(actual interface{}) (success bool, err error) FailureMessage(actual interface{}) (message string) @@ -70,6 +71,11 @@ type AsyncAssertion interface { WithOffset(offset int) AsyncAssertion WithTimeout(interval time.Duration) AsyncAssertion WithPolling(interval time.Duration) AsyncAssertion + Within(timeout time.Duration) AsyncAssertion + ProbeEvery(interval time.Duration) AsyncAssertion + WithContext(ctx context.Context) AsyncAssertion + WithArguments(argsToForward ...interface{}) AsyncAssertion + MustPassRepeatedly(count int) AsyncAssertion } // Assertions are returned by Ω and Expect and enable assertions against Gomega matchers diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index de30de6..a912b75 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -140,12 +140,13 @@ func (c *counter) get() float64 { } func (c *counter) Write(out *dto.Metric) error { - val := c.get() - + // Read the Exemplar first and the value second. This is to avoid a race condition + // where users see an exemplar for a not-yet-existing observation. var exemplar *dto.Exemplar if e := c.exemplar.Load(); e != nil { exemplar = e.(*dto.Exemplar) } + val := c.get() return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) } @@ -245,7 +246,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *CounterVec) WithLabelValues(lvs ...string) Counter { c, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -256,7 +258,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *CounterVec) With(labels Labels) Counter { c, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 9845012..811072c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -21,55 +21,66 @@ // All exported functions and methods are safe to be used concurrently unless // specified otherwise. // -// A Basic Example +// # A Basic Example // // As a starting point, a very basic usage example: // -// package main +// package main // -// import ( -// "log" -// "net/http" +// import ( +// "log" +// "net/http" // -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) // -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) +// type metrics struct { +// cpuTemp prometheus.Gauge +// hdFailures *prometheus.CounterVec +// } // -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } +// func NewMetrics(reg prometheus.Registerer) *metrics { +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m +// } // -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// func main() { +// // Create a non-global registry. +// reg := prometheus.NewRegistry() // -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. +// m.cpuTemp.Set(65.3) +// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() // +// // Expose metrics and custom registry via an HTTP server +// // using the HandleFor function. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } // // This is a complete program that exports two metrics, a Gauge and a Counter, // the latter with a label attached to turn it into a (one-dimensional) vector. +// It register the metrics using a custom registry and exposes them via an HTTP server +// on the /metrics endpoint. // -// Metrics +// # Metrics // // The number of exported identifiers in this package might appear a bit // overwhelming. However, in addition to the basic plumbing shown in the example @@ -100,7 +111,7 @@ // To create instances of Metrics and their vector versions, you need a suitable // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // -// Custom Collectors and constant Metrics +// # Custom Collectors and constant Metrics // // While you could create your own implementations of Metric, most likely you // will only ever implement the Collector interface on your own. At a first @@ -141,7 +152,7 @@ // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting // shortcuts. // -// Advanced Uses of the Registry +// # Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, // sometimes you might want to handle the errors the registration might cause. @@ -176,23 +187,23 @@ // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. // -// HTTP Exposition +// # HTTP Exposition // // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example // above. The tools to expose metrics via HTTP are in the promhttp sub-package. // -// Pushing to the Pushgateway +// # Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // -// Graphite Bridge +// # Graphite Bridge // // Functions and examples to push metrics from a Gatherer to Graphite can be // found in the graphite sub-package. // -// Other Means of Exposition +// # Other Means of Exposition // // More ways of exposing metrics can easily be added by following the approaches // of the existing implementations. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index bd0733d..21271a5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -210,7 +210,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { g, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -221,7 +222,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *GaugeVec) With(labels Labels) Gauge { g, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 0d47fec..4c873a0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -28,19 +28,216 @@ import ( dto "github.com/prometheus/client_model/go" ) +// nativeHistogramBounds for the frac of observed values. Only relevant for +// schema > 0. The position in the slice is the schema. (0 is never used, just +// here for convenience of using the schema directly as the index.) +// +// TODO(beorn7): Currently, we do a binary search into these slices. There are +// ways to turn it into a small number of simple array lookups. It probably only +// matters for schema 5 and beyond, but should be investigated. See this comment +// as a starting point: +// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 +var nativeHistogramBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} + +// The nativeHistogramBounds above can be generated with the code below. +// +// TODO(beorn7): It's tempting to actually use `go generate` to generate the +// code above. However, this could lead to slightly different numbers on +// different architectures. We still need to come to terms if we are fine with +// that, or if we might prefer to specify precise numbers in the standard. +// +// var nativeHistogramBounds [][]float64 = make([][]float64, 9) +// +// func init() { +// // Populate nativeHistogramBounds. +// numBuckets := 1 +// for i := range nativeHistogramBounds { +// bounds := []float64{0.5} +// factor := math.Exp2(math.Exp2(float64(-i))) +// for j := 0; j < numBuckets-1; j++ { +// var bound float64 +// if (j+1)%2 == 0 { +// // Use previously calculated value for increased precision. +// bound = nativeHistogramBounds[i-1][j/2+1] +// } else { +// bound = bounds[j] * factor +// } +// bounds = append(bounds, bound) +// } +// numBuckets *= 2 +// nativeHistogramBounds[i] = bounds +// } +// } + // A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. +// configurable static buckets (or in dynamic sparse buckets as part of the +// experimental Native Histograms, see below for more details). Similar to a +// Summary, it also provides a sum of observations and an observation count. // // On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. +// the histogram_quantile PromQL function. // -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. +// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL +// (see the documentation for detailed procedures). However, Histograms require +// the user to pre-define suitable buckets, and they are in general less +// accurate. (Both problems are addressed by the experimental Native +// Histograms. To use them, configure a NativeHistogramBucketFactor in the +// HistogramOpts. They also require a Prometheus server v2.40+ with the +// corresponding feature flag enabled.) +// +// The Observe method of a Histogram has a very low performance overhead in +// comparison with the Observe method of a Summary. // // To create Histogram instances, use NewHistogram. type Histogram interface { @@ -50,7 +247,8 @@ type Histogram interface { // Observe adds a single observation to the histogram. Observations are // usually positive or zero. Negative observations are accepted but // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See + // counter resets in the sum of observations. (The experimental Native + // Histograms handle negative observations properly.) See // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations // for details. Observe(float64) @@ -64,18 +262,28 @@ const bucketLabel = "le" // tailored to broadly measure the response time (in seconds) of a network // service. Most likely, however, you will be required to define buckets // customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} +var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) +// DefNativeHistogramZeroThreshold is the default value for +// NativeHistogramZeroThreshold in the HistogramOpts. +// +// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), +// which is a bucket boundary at all possible resolutions. +const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 + +// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold +// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero +// bucket that only receives observations of precisely zero. +const NativeHistogramZeroThresholdZero = -1 + +var errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, ) -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the +// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not +// counted and not included in the returned slice. The returned slice is meant +// to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is zero or negative. func LinearBuckets(start, width float64, count int) []float64 { @@ -90,11 +298,11 @@ func LinearBuckets(start, width float64, count int) []float64 { return buckets } -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket +// has an upper bound of 'start' and each following bucket's upper bound is +// 'factor' times the previous bucket's upper bound. The final +Inf bucket is +// not counted and not included in the returned slice. The returned slice is +// meant to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, // or if 'factor' is less than or equal 1. @@ -180,8 +388,85 @@ type HistogramOpts struct { // element in the slice is the upper inclusive bound of a bucket. The // values must be sorted in strictly increasing order. There is no need // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. + // implicitly. If Buckets is left as nil or set to a slice of length + // zero, it is replaced by default buckets. The default buckets are + // DefBuckets if no buckets for a native histogram (see below) are used, + // otherwise the default is no buckets. (In other words, if you want to + // use both reguler buckets and buckets for a native histogram, you have + // to define the regular buckets here explicitly.) Buckets []float64 + + // If NativeHistogramBucketFactor is greater than one, so-called sparse + // buckets are used (in addition to the regular buckets, if defined + // above). A Histogram with sparse buckets will be ingested as a Native + // Histogram by a Prometheus server with that feature enabled (requires + // Prometheus v2.40+). Sparse buckets are exponential buckets covering + // the whole float64 range (with the exception of the “zero” bucket, see + // SparseBucketsZeroThreshold below). From any one bucket to the next, + // the width of the bucket grows by a constant + // factor. NativeHistogramBucketFactor provides an upper bound for this + // factor (exception see below). The smaller + // NativeHistogramBucketFactor, the more buckets will be used and thus + // the more costly the histogram will become. A generally good trade-off + // between cost and accuracy is a value of 1.1 (each bucket is at most + // 10% wider than the previous one), which will result in each power of + // two divided into 8 buckets (e.g. there will be 8 buckets between 1 + // and 2, same as between 2 and 4, and 4 and 8, etc.). + // + // Details about the actually used factor: The factor is calculated as + // 2^(2^n), where n is an integer number between (and including) -8 and + // 4. n is chosen so that the resulting factor is the largest that is + // still smaller or equal to NativeHistogramBucketFactor. Note that the + // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) + // ). If NativeHistogramBucketFactor is greater than 1 but smaller than + // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though + // it is larger than the provided NativeHistogramBucketFactor. + // + // NOTE: Native Histograms are still an experimental feature. Their + // behavior might still change without a major version + // bump. Subsequently, all NativeHistogram... options here might still + // change their behavior or name (or might completely disappear) without + // a major version bump. + NativeHistogramBucketFactor float64 + // All observations with an absolute value of less or equal + // NativeHistogramZeroThreshold are accumulated into a “zero” + // bucket. For best results, this should be close to a bucket + // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold is left at zero, + // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // a zero bucket with an actual threshold of zero (i.e. only + // observations of precisely zero will go into the zero bucket), set + // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero + // constant (or any negative float value). + NativeHistogramZeroThreshold float64 + + // The remaining fields define a strategy to limit the number of + // populated sparse buckets. If NativeHistogramMaxBucketNumber is left + // at zero, the number of buckets is not limited. (Note that this might + // lead to unbounded memory consumption if the values observed by the + // Histogram are sufficiently wide-spread. In particular, this could be + // used as a DoS attack vector. Where the observed values depend on + // external inputs, it is highly recommended to set a + // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber is exceeded, the following strategy is + // enacted: First, if the last reset (or the creation) of the histogram + // is at least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). If less time has passed, or if + // NativeHistogramMinResetDuration is zero, no reset is + // performed. Instead, the zero threshold is increased sufficiently to + // reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. After that, if the + // number of buckets still exceeds NativeHistogramMaxBucketNumber, the + // resolution of the histogram is reduced by doubling the width of the + // sparse buckets (up to a growth factor between one bucket to the next + // of 2^(2^4) = 65536, see above). + NativeHistogramMaxBucketNumber uint32 + NativeHistogramMinResetDuration time.Duration + NativeHistogramMaxZeroThreshold float64 } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -218,16 +503,29 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, + nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, + nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, + lastResetTime: time.Now(), + now: time.Now, + } + if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { + h.upperBounds = DefBuckets + } + if opts.NativeHistogramBucketFactor <= 1 { + h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. + } else { + switch { + case opts.NativeHistogramZeroThreshold > 0: + h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold + case opts.NativeHistogramZeroThreshold == 0: + h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold + } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. + h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -246,8 +544,16 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.counts[0] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } + h.counts[1] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -255,13 +561,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } type histogramCounts struct { + // Order in this struct matters for the alignment required by atomic + // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // observations. sumBits uint64 count uint64 + + // nativeHistogramZeroBucket counts all (positive and negative) + // observations in the zero bucket (with an absolute value less or equal + // the current threshold, see next field. + nativeHistogramZeroBucket uint64 + // nativeHistogramZeroThresholdBits is the bit pattern of the current + // threshold for the zero bucket. It's initially equal to + // nativeHistogramZeroThreshold but may change according to the bucket + // count limitation strategy. + nativeHistogramZeroThresholdBits uint64 + // nativeHistogramSchema may change over time according to the bucket + // count limitation strategy and therefore has to be saved here. + nativeHistogramSchema int32 + // Number of (positive and negative) sparse buckets. + nativeHistogramBucketsNumber uint32 + + // Regular buckets. buckets []uint64 + + // The sparse buckets for native histograms are implemented with a + // sync.Map for now. A dedicated data structure will likely be more + // efficient. There are separate maps for negative and positive + // observations. The map's value is an *int64, counting observations in + // that bucket. (Note that we don't use uint64 as an int64 won't + // overflow in practice, and working with signed numbers from the + // beginning simplifies the handling of deltas.) The map's key is the + // index of the bucket according to the used + // nativeHistogramSchema. Index 0 is for an upper bound of 1. + nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map +} + +// observe manages the parts of observe that only affects +// histogramCounts. doSparse is true if sparse buckets should be done, +// too. +func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { + if bucket < len(hc.buckets) { + atomic.AddUint64(&hc.buckets[bucket], 1) + } + atomicAddFloat(&hc.sumBits, v) + if doSparse && !math.IsNaN(v) { + var ( + key int + schema = atomic.LoadInt32(&hc.nativeHistogramSchema) + zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) + bucketCreated, isInf bool + ) + if math.IsInf(v, 0) { + // Pretend v is MaxFloat64 but later increment key by one. + if math.IsInf(v, +1) { + v = math.MaxFloat64 + } else { + v = -math.MaxFloat64 + } + isInf = true + } + frac, exp := math.Frexp(math.Abs(v)) + if schema > 0 { + bounds := nativeHistogramBounds[schema] + key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) + } else { + key = exp + if frac == 0.5 { + key-- + } + div := 1 << -schema + key = (key + div - 1) / div + } + if isInf { + key++ + } + switch { + case v > zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) + case v < -zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) + default: + atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) + } + if bucketCreated { + atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hc.count, 1) } type histogram struct { @@ -276,7 +667,7 @@ type histogram struct { // perspective of the histogram) swap the hot–cold under the writeMtx // lock. A cooldown is awaited (while locked) by comparing the number of // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must + // last observation on the now cool one has completed. All cold fields must // be merged into the new hot before releasing writeMtx. // // Fields with atomic access first! See alignment constraint: @@ -284,8 +675,10 @@ type histogram struct { countAndHotIdx uint64 selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. + desc *Desc + + // Only used in the Write method and for sparse bucket management. + mtx sync.Mutex // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of @@ -293,9 +686,15 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. + nativeHistogramZeroThreshold float64 // The initial zero threshold. + nativeHistogramMaxZeroThreshold float64 + nativeHistogramMaxBuckets uint32 + nativeHistogramMinResetDuration time.Duration + lastResetTime time.Time // Protected by mtx. now func() time.Time // To mock out time.Now() for testing. } @@ -319,8 +718,8 @@ func (h *histogram) Write(out *dto.Metric) error { // the hot path, i.e. Observe is called much more often than Write. The // complication of making Write lock-free isn't worth it, if possible at // all. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() + h.mtx.Lock() + defer h.mtx.Unlock() // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) // without touching the count bits. See the struct comments for a full @@ -333,16 +732,16 @@ func (h *histogram) Write(out *dto.Metric) error { hotCounts := h.counts[n>>63] coldCounts := h.counts[(^n)>>63] - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } + waitForCooldown(count, coldCounts) his := &dto.Histogram{ Bucket: make([]*dto.Bucket, len(h.upperBounds)), SampleCount: proto.Uint64(count), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), } + out.Histogram = his + out.Label = h.labelPairs + var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) @@ -363,25 +762,21 @@ func (h *histogram) Write(out *dto.Metric) error { } his.Bucket = append(his.Bucket, b) } + if h.nativeHistogramSchema > math.MinInt32 { + his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) + his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) + zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) - out.Histogram = his - out.Label = h.labelPairs + defer func() { + coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) + coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) + }() - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) + his.ZeroCount = proto.Uint64(zeroBucket) + his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) + his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) } + addAndResetCounts(hotCounts, coldCounts) return nil } @@ -402,25 +797,216 @@ func (h *histogram) findBucket(v float64) int { // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { + // Do not add to sparse buckets for NaN observations. + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1) hotCounts := h.counts[n>>63] - - if bucket < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[bucket], 1) + hotCounts.observe(v, bucket, doSparse) + if doSparse { + h.limitBuckets(hotCounts, v, bucket) } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break +} + +// limitSparsebuckets applies a strategy to limit the number of populated sparse +// buckets. It's generally best effort, and there are situations where the +// number can go higher (if even the lowest resolution isn't enough to reduce +// the number sufficiently, or if the provided counts aren't fully updated yet +// by a concurrently happening Write call). +func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { + if h.nativeHistogramMaxBuckets == 0 { + return // No limit configured. + } + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded yet. + } + + h.mtx.Lock() + defer h.mtx.Unlock() + + // The hot counts might have been swapped just before we acquired the + // lock. Re-fetch the hot counts first... + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hotCounts := h.counts[hotIdx] + coldCounts := h.counts[coldIdx] + // ...and then check again if we really have to reduce the bucket count. + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded after all. + } + // Try the various strategies in order. + if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { + return + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { + return + } + h.doubleBucketWidth(hotCounts, coldCounts) +} + +// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration +// has been passed. It returns true if the histogram has been reset. The caller +// must have locked h.mtx. +func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { + // We are using the possibly mocked h.now() rather than + // time.Since(h.lastResetTime) to enable testing. + if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + return false + } + // Completely reset coldCounts. + h.resetCounts(cold) + // Repeat the latest observation to not lose it completely. + cold.observe(value, bucket, true) + // Make coldCounts the new hot counts while ressetting countAndHotIdx. + n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + return true +} + +// maybeWidenZeroBucket widens the zero bucket until it includes the existing +// buckets closest to the zero bucket (which could be two, if an equidistant +// negative and a positive bucket exists, but usually it's only one bucket to be +// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold +// limits how far the zero bucket can be extended, and if that's not enough to +// include an existing bucket, the method returns false. The caller must have +// locked h.mtx. +func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) + if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { + return false + } + // Find the key of the bucket closest to zero. + smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) + smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) + if smallestNegativeKey < smallestKey { + smallestKey = smallestNegativeKey + } + if smallestKey == math.MaxInt32 { + return false + } + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) + if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { + return false // New threshold would exceed the max threshold. + } + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // Remove applicable buckets. + if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + // Make cold counts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the new zero threshold in the cold counts, too... + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // ...and then merge the newly deleted buckets into the wider zero + // bucket. + mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + if key == smallestKey { + // Merge into hot zero bucket... + atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) + // ...and delete from cold counts. + coldBuckets.Delete(key) + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } else { + // Add to corresponding hot bucket... + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + // ...and reset cold bucket. + atomic.StoreInt64(bucket, 0) + } + return true } } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) + + cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) + return true +} + +// doubleBucketWidth doubles the bucket width (by decrementing the schema +// number). Note that very sparse buckets could lead to a low reduction of the +// bucket count (or even no reduction at all). The method does nothing if the +// schema is already -4. +func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { + coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) + if coldSchema == -4 { + return // Already at lowest resolution. + } + coldSchema-- + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // Play it simple and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) + // Make coldCounts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the schema in the cold counts, too... + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // ...and then merge the cold buckets into the wider hot buckets. + merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + // Adjust key to match the bucket to merge into. + if key > 0 { + key++ + } + key /= 2 + // Add to corresponding hot bucket. + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + return true + } + } + + cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) + // Play it simple again and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) +} + +func (h *histogram) resetCounts(counts *histogramCounts) { + atomic.StoreUint64(&counts.sumBits, 0) + atomic.StoreUint64(&counts.count, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) + atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) + for i := range h.upperBounds { + atomic.StoreUint64(&counts.buckets[i], 0) + } + deleteSyncMap(&counts.nativeHistogramBucketsNegative) + deleteSyncMap(&counts.nativeHistogramBucketsPositive) } // updateExemplar replaces the exemplar for the provided bucket. With empty @@ -516,7 +1102,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { h, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -527,7 +1114,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *HistogramVec) With(labels Labels) Observer { h, err := v.GetMetricWith(labels) if err != nil { @@ -613,7 +1201,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // to send it to Prometheus in the Collect method. // // buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. +// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. // // NewConstHistogram returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. @@ -668,3 +1256,229 @@ func (s buckSort) Swap(i, j int) { func (s buckSort) Less(i, j int) bool { return s[i].GetUpperBound() < s[j].GetUpperBound() } + +// pickSchema returns the largest number n between -4 and 8 such that +// 2^(2^-n) is less or equal the provided bucketFactor. +// +// Special cases: +// - bucketFactor <= 1: panics. +// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. +func pickSchema(bucketFactor float64) int32 { + if bucketFactor <= 1 { + panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) + } + floor := math.Floor(math.Log2(math.Log2(bucketFactor))) + switch { + case floor <= -8: + return 8 + case floor >= 4: + return -4 + default: + return -int32(floor) + } +} + +func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { + var ii []int + buckets.Range(func(k, v interface{}) bool { + ii = append(ii, k.(int)) + return true + }) + sort.Ints(ii) + + if len(ii) == 0 { + return nil, nil + } + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + v, _ := buckets.Load(i) + count := atomic.LoadInt64(v.(*int64)) + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one ore two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} + +// addToBucket increments the sparse bucket at key by the provided amount. It +// returns true if a new sparse bucket had to be created for that. +func addToBucket(buckets *sync.Map, key int, increment int64) bool { + if existingBucket, ok := buckets.Load(key); ok { + // Fast path without allocation. + atomic.AddInt64(existingBucket.(*int64), increment) + return false + } + // Bucket doesn't exist yet. Slow path allocating new counter. + newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. + if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { + // The bucket was created concurrently in another goroutine. + // Have to increment after all. + atomic.AddInt64(actualBucket.(*int64), increment) + return false + } + return true +} + +// addAndReset returns a function to be used with sync.Map.Range of spare +// buckets in coldCounts. It increments the buckets in the provided hotBuckets +// according to the buckets ranged through. It then resets all buckets ranged +// through to 0 (but leaves them in place so that they don't need to get +// recreated on the next scrape). +func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + bucket := v.(*int64) + if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { + atomic.AddUint32(bucketNumber, 1) + } + atomic.StoreInt64(bucket, 0) + return true + } +} + +func deleteSyncMap(m *sync.Map) { + m.Range(func(k, v interface{}) bool { + m.Delete(k) + return true + }) +} + +func findSmallestKey(m *sync.Map) int { + result := math.MaxInt32 + m.Range(func(k, v interface{}) bool { + key := k.(int) + if key < result { + result = key + } + return true + }) + return result +} + +func getLe(key int, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with a key + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its key + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := key << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := key & ((1 << schema) - 1) + frac := nativeHistogramBounds[schema][fracIdx] + exp := (key >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// waitForCooldown returns after the count field in the provided histogramCounts +// has reached the provided count value. +func waitForCooldown(count uint64, counts *histogramCounts) { + for count != atomic.LoadUint64(&counts.count) { + runtime.Gosched() // Let observations get work done. + } +} + +// atomicAddFloat adds the provided float atomically to another float +// represented by the bit pattern the bits pointer is pointing to. +func atomicAddFloat(bits *uint64, v float64) { + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } +} + +// atomicDecUint32 atomically decrements the uint32 p points to. See +// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. +func atomicDecUint32(p *uint32) { + atomic.AddUint32(p, ^uint32(0)) +} + +// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero +// bucket) from the cold counts to the corresponding fields in the hot +// counts. Those fields are then reset to 0 in the cold counts. +func addAndResetCounts(hot, cold *histogramCounts) { + atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) + atomic.StoreUint64(&cold.count, 0) + coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) + atomicAddFloat(&hot.sumBits, coldSum) + atomic.StoreUint64(&cold.sumBits, 0) + for i := range hot.buckets { + atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) + atomic.StoreUint64(&cold.buckets[i], 0) + } + atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) + atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go new file mode 100644 index 0000000..1ed5abe --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go @@ -0,0 +1,60 @@ +// Copyright (c) 2015 Björn Rabenstein +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// The code in this package is copy/paste to avoid a dependency. Hence this file +// carries the copyright of the original repo. +// https://github.com/beorn7/floats +package internal + +import ( + "math" +) + +// minNormalFloat64 is the smallest positive normal value of type float64. +var minNormalFloat64 = math.Float64frombits(0x0010000000000000) + +// AlmostEqualFloat64 returns true if a and b are equal within a relative error +// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the +// details of the applied method. +func AlmostEqualFloat64(a, b, epsilon float64) bool { + if a == b { + return true + } + absA := math.Abs(a) + absB := math.Abs(b) + diff := math.Abs(a - b) + if a == 0 || b == 0 || absA+absB < minNormalFloat64 { + return diff < epsilon*minNormalFloat64 + } + return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon +} + +// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. +func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !AlmostEqualFloat64(a[i], b[i], epsilon) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index fd45cad..fd0750f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -201,12 +201,15 @@ func (m *SequenceMatcher) isBJunk(s string) bool { // If IsJunk is not defined: // // Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// // and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' +// +// k >= k' +// i <= i' +// and if i == i', j <= j' // // In other words, of all maximal matching blocks, return one that // starts earliest in a, and of all those maximal matching blocks that diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 6eee198..c1b8fad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -25,7 +25,8 @@ import ( // Labels represents a collection of label name -> value mappings. This type is // commonly used with the With(Labels) and GetMetricWith(Labels) methods of // metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // // The other use-case is the specification of constant label pairs in Opts or to // create a Desc. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f0941f6..b5119c5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -187,7 +187,7 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error { } else { // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()), + CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 097aff2..2108678 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -73,12 +73,11 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - exemplarAdd( + addWithExemplar( counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), 1, rtOpts.getExemplarFn(r.Context()), ) - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc() } return resp, err } @@ -117,7 +116,7 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index bfe5009..cca67a7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -28,7 +28,9 @@ import ( // magicString is used for the hacky label test in checkLabels. Remove once fixed. const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" -func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) { +// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver], +// which falls back to [prometheus.Observer.Observe] if no labels are provided. +func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) { if labels == nil { obs.Observe(val) return @@ -36,7 +38,9 @@ func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]str obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) } -func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) { +// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar], +// which falls back to [prometheus.Counter.Add] if no labels are provided. +func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) { if labels == nil { obs.Add(val) return @@ -91,7 +95,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op d := newDelegator(w, nil) next.ServeHTTP(d, r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()), @@ -103,7 +107,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op now := time.Now() next.ServeHTTP(w, r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()), @@ -141,7 +145,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, d := newDelegator(w, nil) next.ServeHTTP(d, r) - exemplarAdd( + addWithExemplar( counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), 1, hOpts.getExemplarFn(r.Context()), @@ -151,7 +155,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - exemplarAdd( + addWithExemplar( counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), 1, hOpts.getExemplarFn(r.Context()), @@ -192,7 +196,7 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()), @@ -233,7 +237,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), float64(size), hOpts.getExemplarFn(r.Context()), @@ -244,7 +248,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), float64(size), hOpts.getExemplarFn(r.Context()), @@ -282,7 +286,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), float64(d.Written()), hOpts.getExemplarFn(r.Context()), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 325f665..09e34d3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -252,9 +252,12 @@ func (errs MultiError) MaybeUnwrap() error { } // Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. +// them into MetricFamilies for exposition. It implements Registerer, Gatherer, +// and Collector. The zero value is not usable. Create instances with +// NewRegistry or NewPedanticRegistry. +// +// Registry implements Collector to allow it to be used for creating groups of +// metrics. See the Grouping example for how this can be done. type Registry struct { mtx sync.RWMutex collectorsByID map[uint64]Collector // ID is a hash of the descIDs. @@ -556,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } +// Describe implements Collector. +func (r *Registry) Describe(ch chan<- *Desc) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + // Only report the checked Collectors; unchecked collectors don't report any + // Desc. + for _, c := range r.collectorsByID { + c.Describe(ch) + } +} + +// Collect implements Collector. +func (r *Registry) Collect(ch chan<- Metric) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + for _, c := range r.collectorsByID { + c.Collect(ch) + } + for _, c := range r.uncheckedCollectors { + c.Collect(ch) + } +} + // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the // Prometheus text format, and writes it to a temporary file. Upon success, the // temporary file is renamed to the provided filename. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index c5fa8ed..7bc448a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -603,7 +603,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { s, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -614,7 +615,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *SummaryVec) With(labels Labels) Observer { s, err := v.GetMetricWith(labels) if err != nil { @@ -701,7 +703,8 @@ func (s *constSummary) Write(out *dto.Metric) error { // // quantiles maps ranks to quantile values. For example, a median latency of // 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index 8d5f105..f28a76f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -25,11 +25,12 @@ type Timer struct { // NewTimer creates a new Timer. The provided Observer is used to observe a // duration in seconds. Timer is usually used to time a function call in the // following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } +// +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7657f84..f4fc884 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -115,32 +115,28 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader - p TextParser - fams []*dto.MetricFamily + fams map[string]*dto.MetricFamily + err error } // Decode implements the Decoder interface. func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) + if d.err == nil { + // Read all metrics in one shot. + var p TextParser + d.fams, d.err = p.TextToMetricFamilies(d.r) + // If we don't get an error, store io.EOF for the end. + if d.err == nil { + d.err = io.EOF } } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil + // Pick off one MetricFamily per Decode until there's nothing left. + for key, fam := range d.fams { + *v = *fam + delete(d.fams, key) + return nil + } + return d.err } // SampleDecoder wraps a Decoder to extract samples from the metric families diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index f819e4f..dfac962 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -21,8 +21,8 @@ import "bytes" // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 9d94ae9..21cdddc 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -46,20 +46,20 @@ import ( // missing features and peculiarities to avoid complications when switching from // Prometheus to OpenMetrics or vice versa: // -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. // -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). // -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { name := in.GetName() if name == "" { diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 5ba503b..2946b8f 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "io" - "io/ioutil" "math" "strconv" "strings" @@ -44,7 +43,7 @@ const ( var ( bufPool = sync.Pool{ New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) + return bufio.NewWriter(io.Discard) }, } numBufPool = sync.Pool{ diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 84be064..ac24827 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) { func (p *TextParser) startOfLine() stateFn { p.lineCount++ if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil + // This is the only place that we expect to see io.EOF, + // which is not an error but the signal that we are done. + // Any other error that happens to align with the start of + // a line is still an error. + if p.err == io.EOF { + p.err = nil + } return nil } switch p.currentByte { diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 26e9228..a21b9d1 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ package goautoneg diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index c909b8a..5727452 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "math" - "regexp" "strconv" "strings" "time" @@ -183,54 +182,78 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") +func isdigit(c byte) bool { return c >= '0' && c <= '9' } + +// Units are required to go in order from biggest to smallest. +// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. +var unitMap = map[string]struct { + pos int + mult uint64 +}{ + "ms": {7, uint64(time.Millisecond)}, + "s": {6, uint64(time.Second)}, + "m": {5, uint64(time.Minute)}, + "h": {4, uint64(time.Hour)}, + "d": {3, uint64(24 * time.Hour)}, + "w": {2, uint64(7 * 24 * time.Hour)}, + "y": {1, uint64(365 * 24 * time.Hour)}, +} // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { +func ParseDuration(s string) (Duration, error) { + switch s { case "0": // Allow 0 without a unit. return 0, nil case "": return 0, errors.New("empty duration string") } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var dur time.Duration - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return + orig := s + var dur uint64 + lastUnitPos := 0 + + for s != "" { + if !isdigit(s[0]) { + return 0, fmt.Errorf("not a valid duration string: %q", orig) } - n, _ := strconv.Atoi(matches[pos]) + // Consume [0-9]* + i := 0 + for ; i < len(s) && isdigit(s[i]); i++ { + } + v, err := strconv.ParseUint(s[:i], 10, 0) + if err != nil { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + s = s[i:] + // Consume unit. + for i = 0; i < len(s) && !isdigit(s[i]); i++ { + } + if i == 0 { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) + } + if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + lastUnitPos = unit.pos // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") + if v > 1<<63/unit.mult { + return 0, errors.New("duration out of range") } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") + dur += v * unit.mult + if dur > 1<<63-1 { + return 0, errors.New("duration out of range") } } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr + return Duration(dur), nil } func (d Duration) String() string { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9d8fb1..9eb4404 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -16,20 +16,12 @@ package model import ( "encoding/json" "fmt" - "math" "sort" "strconv" "strings" ) var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - // ZeroSample is the pseudo zero-value of Sample used to signal a // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, // and metric nil. Note that the natural zero value of Sample has a timestamp @@ -38,82 +30,14 @@ var ( ZeroSample = Sample{Timestamp: Earliest} ) -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. +// Sample is a sample pair associated with a metric. A single sample must either +// define Value or Histogram but not both. Histogram == nil implies the Value +// field is used, otherwise it should be ignored. type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` + Histogram *SampleHistogram `json:"histogram"` } // Equal compares first the metrics, then the timestamp, then the value. The @@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - + if s.Histogram != nil { + return s.Histogram.Equal(o.Histogram) + } return s.Value.Equal(o.Value) } func (s Sample) String() string { + if s.Histogram != nil { + return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }) + } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ Timestamp: s.Timestamp, Value: s.Value, @@ -142,6 +74,19 @@ func (s Sample) String() string { // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { + if s.Histogram != nil { + v := struct { + Metric Metric `json:"metric"` + Histogram SampleHistogramPair `json:"histogram"` + }{ + Metric: s.Metric, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, + } + return json.Marshal(&v) + } v := struct { Metric Metric `json:"metric"` Value SamplePair `json:"value"` @@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) { Value: s.Value, }, } - return json.Marshal(&v) } // UnmarshalJSON implements json.Unmarshaler. func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram SampleHistogramPair `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, } if err := json.Unmarshal(b, &v); err != nil { @@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error { } s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value + if v.Histogram.Histogram != nil { + s.Timestamp = v.Histogram.Timestamp + s.Histogram = v.Histogram.Histogram + } else { + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + } return nil } @@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool { // SampleStream is a stream of Values belonging to an attached COWMetric. type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` } func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) + valuesLength := len(ss.Values) + vals := make([]string, valuesLength+len(ss.Histograms)) for i, v := range ss.Values { vals[i] = v.String() } + for i, v := range ss.Histograms { + vals[i+valuesLength] = v.String() + } return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) } -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string +func (ss SampleStream) MarshalJSON() ([]byte, error) { + if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else if len(ss.Histograms) > 0 { + v := struct { + Metric Metric `json:"metric"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + }{ + Metric: ss.Metric, + Values: ss.Values, + } + return json.Marshal(&v) + } } -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } +func (ss *SampleStream) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { + if err := json.Unmarshal(b, &v); err != nil { return err } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") + ss.Metric = v.Metric + ss.Values = v.Values + ss.Histograms = v.Histograms + + return nil } // Scalar is a scalar value evaluated at the set timestamp. diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go new file mode 100644 index 0000000..0f615a7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "strconv" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go new file mode 100644 index 0000000..54bb038 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -0,0 +1,178 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type FloatString float64 + +func (v FloatString) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +func (v FloatString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *FloatString) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("float value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = FloatString(f) + return nil +} + +type HistogramBucket struct { + Boundaries int32 + Lower FloatString + Upper FloatString + Count FloatString +} + +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(s.Boundaries) + if err != nil { + return nil, err + } + l, err := json.Marshal(s.Lower) + if err != nil { + return nil, err + } + u, err := json.Marshal(s.Upper) + if err != nil { + return nil, err + } + c, err := json.Marshal(s.Count) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +} + +func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s *HistogramBucket) Equal(o *HistogramBucket) bool { + return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) +} + +func (b HistogramBucket) String() string { + var sb strings.Builder + lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 + upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + if lowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if upperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +type HistogramBuckets []*HistogramBucket + +func (s HistogramBuckets) Equal(o HistogramBuckets) bool { + if len(s) != len(o) { + return false + } + + for i, bucket := range s { + if !bucket.Equal(o[i]) { + return false + } + } + return true +} + +type SampleHistogram struct { + Count FloatString `json:"count"` + Sum FloatString `json:"sum"` + Buckets HistogramBuckets `json:"buckets"` +} + +func (s SampleHistogram) String() string { + return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) +} + +func (s *SampleHistogram) Equal(o *SampleHistogram) bool { + return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) +} + +type SampleHistogramPair struct { + Timestamp Time + // Histogram should never be nil, it's only stored as pointer for efficiency. + Histogram *SampleHistogram +} + +func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { + if s.Histogram == nil { + return nil, fmt.Errorf("histogram is nil") + } + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Histogram) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + if s.Histogram == nil { + return fmt.Errorf("histogram is null") + } + return nil +} + +func (s SampleHistogramPair) String() string { + return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) +} + +func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { + return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) +} diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go new file mode 100644 index 0000000..726c50e --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 6c8e3e2..e358db6 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.13.0 +PROMU_VERSION ?= 0.14.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.45.2 +GOLANGCI_LINT_VERSION ?= v1.49.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. - ifeq (,$(CIRCLE_JOB)) + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index ff6b927..06968ca 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } +func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "CPU Family": + cpuinfo[i].CPUFamily = field[1] + case "Model Name": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go similarity index 59% rename from vendor/cloud.google.com/go/compute/metadata/retry_linux.go rename to vendor/github.com/prometheus/procfs/cpuinfo_loong64.go index bb412f8..d88442f 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -1,10 +1,9 @@ -// Copyright 2021 Google LLC -// +// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +14,6 @@ //go:build linux // +build linux -package metadata +package procfs -import "syscall" - -func init() { - // Initialize syscallRetryable to return true on transient socket-level - // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } -} +var parseCPUInfo = parseCPUInfoLoong diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index ea41bf2..a6b2b31 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x -// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index d31a826..f9d961e 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -16,30 +16,29 @@ // // Example: // -// package main +// package main // -// import ( -// "fmt" -// "log" +// import ( +// "fmt" +// "log" // -// "github.com/prometheus/procfs" -// ) +// "github.com/prometheus/procfs" +// ) // -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } // -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } // +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index f7a828b..0c482c1 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -284,7 +284,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { } // parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] +// +// device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { return nil, fmt.Errorf("invalid device entry: %v", ss) diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index a94f86d..06b7b8f 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -27,8 +27,9 @@ import ( // For the proc file format details, // See: // * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 -// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. +// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 +// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 +// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 // SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { @@ -38,6 +39,18 @@ type SoftnetStat struct { Dropped uint32 // Number of times processing packets ran out of quota. TimeSqueezed uint32 + // Number of collision occur while obtaining device lock while transmitting. + CPUCollision uint32 + // Number of times cpu woken up received_rps. + ReceivedRps uint32 + // number of times flow limit has been reached. + FlowLimitCount uint32 + // Softnet backlog status. + SoftnetBacklogLen uint32 + // CPU id owning this softnet_data. + Index uint32 + // softnet_data's Width. + Width int } var softNetProcFile = "net/softnet_stat" @@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) + softnetStat := SoftnetStat{} if width < minColumns { return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) } - // We only parse the first three columns at the moment. - us, err := parseHexUint32s(columns[0:3]) - if err != nil { - return nil, err + // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 + if width >= minColumns { + us, err := parseHexUint32s(columns[0:9]) + if err != nil { + return nil, err + } + + softnetStat.Processed = us[0] + softnetStat.Dropped = us[1] + softnetStat.TimeSqueezed = us[2] + softnetStat.CPUCollision = us[8] } - stats = append(stats, SoftnetStat{ - Processed: us[0], - Dropped: us[1], - TimeSqueezed: us[2], - }) + // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 + if width >= 10 { + us, err := parseHexUint32s(columns[9:10]) + if err != nil { + return nil, err + } + + softnetStat.ReceivedRps = us[0] + } + + // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 + if width >= 11 { + us, err := parseHexUint32s(columns[10:11]) + if err != nil { + return nil, err + } + + softnetStat.FlowLimitCount = us[0] + } + + // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 + if width >= 13 { + us, err := parseHexUint32s(columns[11:13]) + if err != nil { + return nil, err + } + + softnetStat.SoftnetBacklogLen = us[0] + softnetStat.Index = us[1] + } + softnetStat.Width = width + stats = append(stats, softnetStat) } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index dcea9c5..5cc40ae 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,6 +15,7 @@ package procfs import ( "bufio" + "io" "os" "path/filepath" "strconv" @@ -42,27 +43,43 @@ func (fs FS) NetStat() ([]NetStat, error) { return nil, err } - netStatFile := NetStat{ - Filename: filepath.Base(filePath), - Stats: make(map[string][]uint64), + procNetstat, err := parseNetstat(file) + if err != nil { + return nil, err } - scanner := bufio.NewScanner(file) - scanner.Scan() - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) + procNetstat.Filename = filepath.Base(filePath) - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 64) - if err != nil { - return nil, err - } - netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value) - } - } - netStatsTotal = append(netStatsTotal, netStatFile) + netStatsTotal = append(netStatsTotal, procNetstat) } return netStatsTotal, nil } + +// parseNetstat parses the metrics from `/proc/net/stat/` file +// and returns a NetStat structure. +func parseNetstat(r io.Reader) (NetStat, error) { + var ( + scanner = bufio.NewScanner(r) + netStat = NetStat{ + Stats: make(map[string][]uint64), + } + ) + + scanner.Scan() + + // First string is always a header for stats + var headers []string + headers = append(headers, strings.Fields(scanner.Text())...) + + // Other strings represent per-CPU counters + for scanner.Scan() { + for num, counter := range strings.Fields(scanner.Text()) { + value, err := strconv.ParseUint(counter, 16, 64) + if err != nil { + return NetStat{}, err + } + netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) + } + } + + return netStat, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index cca0332..ea83a75 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a // specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go new file mode 100644 index 0000000..9df79c2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Interrupt represents a single interrupt line. +type Interrupt struct { + // Info is the type of interrupt. + Info string + // Devices is the name of the device that is located at that IRQ + Devices string + // Values is the number of interrupts per CPU. + Values []string +} + +// Interrupts models the content of /proc/interrupts. Key is the IRQ number. +// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts +// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output +type Interrupts map[string]Interrupt + +// Interrupts creates a new instance from a given Proc instance. +func (p Proc) Interrupts() (Interrupts, error) { + data, err := util.ReadFileNoStat(p.path("interrupts")) + if err != nil { + return nil, err + } + return parseInterrupts(bytes.NewReader(data)) +} + +func parseInterrupts(r io.Reader) (Interrupts, error) { + var ( + interrupts = Interrupts{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return nil, errors.New("interrupts empty") + } + cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) == 0 { // skip empty lines + continue + } + if len(parts) < 2 { + return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + } + intName := parts[0][:len(parts[0])-1] // remove trailing : + + if len(parts) == 2 { + interrupts[intName] = Interrupt{ + Info: "", + Devices: "", + Values: []string{ + parts[1], + }, + } + continue + } + + intr := Interrupt{ + Values: parts[1 : cpuNum+1], + } + + if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt + intr.Info = parts[cpuNum+1] + intr.Devices = strings.Join(parts[cpuNum+2:], " ") + } else { + intr.Info = strings.Join(parts[cpuNum+1:], " ") + } + interrupts[intName] = intr + } + + return interrupts, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 48b5238..6a43bb2 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -33,139 +33,140 @@ type ProcNetstat struct { } type TcpExt struct { // nolint:revive - SyncookiesSent float64 - SyncookiesRecv float64 - SyncookiesFailed float64 - EmbryonicRsts float64 - PruneCalled float64 - RcvPruned float64 - OfoPruned float64 - OutOfWindowIcmps float64 - LockDroppedIcmps float64 - ArpFilter float64 - TW float64 - TWRecycled float64 - TWKilled float64 - PAWSActive float64 - PAWSEstab float64 - DelayedACKs float64 - DelayedACKLocked float64 - DelayedACKLost float64 - ListenOverflows float64 - ListenDrops float64 - TCPHPHits float64 - TCPPureAcks float64 - TCPHPAcks float64 - TCPRenoRecovery float64 - TCPSackRecovery float64 - TCPSACKReneging float64 - TCPSACKReorder float64 - TCPRenoReorder float64 - TCPTSReorder float64 - TCPFullUndo float64 - TCPPartialUndo float64 - TCPDSACKUndo float64 - TCPLossUndo float64 - TCPLostRetransmit float64 - TCPRenoFailures float64 - TCPSackFailures float64 - TCPLossFailures float64 - TCPFastRetrans float64 - TCPSlowStartRetrans float64 - TCPTimeouts float64 - TCPLossProbes float64 - TCPLossProbeRecovery float64 - TCPRenoRecoveryFail float64 - TCPSackRecoveryFail float64 - TCPRcvCollapsed float64 - TCPDSACKOldSent float64 - TCPDSACKOfoSent float64 - TCPDSACKRecv float64 - TCPDSACKOfoRecv float64 - TCPAbortOnData float64 - TCPAbortOnClose float64 - TCPAbortOnMemory float64 - TCPAbortOnTimeout float64 - TCPAbortOnLinger float64 - TCPAbortFailed float64 - TCPMemoryPressures float64 - TCPMemoryPressuresChrono float64 - TCPSACKDiscard float64 - TCPDSACKIgnoredOld float64 - TCPDSACKIgnoredNoUndo float64 - TCPSpuriousRTOs float64 - TCPMD5NotFound float64 - TCPMD5Unexpected float64 - TCPMD5Failure float64 - TCPSackShifted float64 - TCPSackMerged float64 - TCPSackShiftFallback float64 - TCPBacklogDrop float64 - PFMemallocDrop float64 - TCPMinTTLDrop float64 - TCPDeferAcceptDrop float64 - IPReversePathFilter float64 - TCPTimeWaitOverflow float64 - TCPReqQFullDoCookies float64 - TCPReqQFullDrop float64 - TCPRetransFail float64 - TCPRcvCoalesce float64 - TCPOFOQueue float64 - TCPOFODrop float64 - TCPOFOMerge float64 - TCPChallengeACK float64 - TCPSYNChallenge float64 - TCPFastOpenActive float64 - TCPFastOpenActiveFail float64 - TCPFastOpenPassive float64 - TCPFastOpenPassiveFail float64 - TCPFastOpenListenOverflow float64 - TCPFastOpenCookieReqd float64 - TCPFastOpenBlackhole float64 - TCPSpuriousRtxHostQueues float64 - BusyPollRxPackets float64 - TCPAutoCorking float64 - TCPFromZeroWindowAdv float64 - TCPToZeroWindowAdv float64 - TCPWantZeroWindowAdv float64 - TCPSynRetrans float64 - TCPOrigDataSent float64 - TCPHystartTrainDetect float64 - TCPHystartTrainCwnd float64 - TCPHystartDelayDetect float64 - TCPHystartDelayCwnd float64 - TCPACKSkippedSynRecv float64 - TCPACKSkippedPAWS float64 - TCPACKSkippedSeq float64 - TCPACKSkippedFinWait2 float64 - TCPACKSkippedTimeWait float64 - TCPACKSkippedChallenge float64 - TCPWinProbe float64 - TCPKeepAlive float64 - TCPMTUPFail float64 - TCPMTUPSuccess float64 - TCPWqueueTooBig float64 + SyncookiesSent *float64 + SyncookiesRecv *float64 + SyncookiesFailed *float64 + EmbryonicRsts *float64 + PruneCalled *float64 + RcvPruned *float64 + OfoPruned *float64 + OutOfWindowIcmps *float64 + LockDroppedIcmps *float64 + ArpFilter *float64 + TW *float64 + TWRecycled *float64 + TWKilled *float64 + PAWSActive *float64 + PAWSEstab *float64 + DelayedACKs *float64 + DelayedACKLocked *float64 + DelayedACKLost *float64 + ListenOverflows *float64 + ListenDrops *float64 + TCPHPHits *float64 + TCPPureAcks *float64 + TCPHPAcks *float64 + TCPRenoRecovery *float64 + TCPSackRecovery *float64 + TCPSACKReneging *float64 + TCPSACKReorder *float64 + TCPRenoReorder *float64 + TCPTSReorder *float64 + TCPFullUndo *float64 + TCPPartialUndo *float64 + TCPDSACKUndo *float64 + TCPLossUndo *float64 + TCPLostRetransmit *float64 + TCPRenoFailures *float64 + TCPSackFailures *float64 + TCPLossFailures *float64 + TCPFastRetrans *float64 + TCPSlowStartRetrans *float64 + TCPTimeouts *float64 + TCPLossProbes *float64 + TCPLossProbeRecovery *float64 + TCPRenoRecoveryFail *float64 + TCPSackRecoveryFail *float64 + TCPRcvCollapsed *float64 + TCPDSACKOldSent *float64 + TCPDSACKOfoSent *float64 + TCPDSACKRecv *float64 + TCPDSACKOfoRecv *float64 + TCPAbortOnData *float64 + TCPAbortOnClose *float64 + TCPAbortOnMemory *float64 + TCPAbortOnTimeout *float64 + TCPAbortOnLinger *float64 + TCPAbortFailed *float64 + TCPMemoryPressures *float64 + TCPMemoryPressuresChrono *float64 + TCPSACKDiscard *float64 + TCPDSACKIgnoredOld *float64 + TCPDSACKIgnoredNoUndo *float64 + TCPSpuriousRTOs *float64 + TCPMD5NotFound *float64 + TCPMD5Unexpected *float64 + TCPMD5Failure *float64 + TCPSackShifted *float64 + TCPSackMerged *float64 + TCPSackShiftFallback *float64 + TCPBacklogDrop *float64 + PFMemallocDrop *float64 + TCPMinTTLDrop *float64 + TCPDeferAcceptDrop *float64 + IPReversePathFilter *float64 + TCPTimeWaitOverflow *float64 + TCPReqQFullDoCookies *float64 + TCPReqQFullDrop *float64 + TCPRetransFail *float64 + TCPRcvCoalesce *float64 + TCPRcvQDrop *float64 + TCPOFOQueue *float64 + TCPOFODrop *float64 + TCPOFOMerge *float64 + TCPChallengeACK *float64 + TCPSYNChallenge *float64 + TCPFastOpenActive *float64 + TCPFastOpenActiveFail *float64 + TCPFastOpenPassive *float64 + TCPFastOpenPassiveFail *float64 + TCPFastOpenListenOverflow *float64 + TCPFastOpenCookieReqd *float64 + TCPFastOpenBlackhole *float64 + TCPSpuriousRtxHostQueues *float64 + BusyPollRxPackets *float64 + TCPAutoCorking *float64 + TCPFromZeroWindowAdv *float64 + TCPToZeroWindowAdv *float64 + TCPWantZeroWindowAdv *float64 + TCPSynRetrans *float64 + TCPOrigDataSent *float64 + TCPHystartTrainDetect *float64 + TCPHystartTrainCwnd *float64 + TCPHystartDelayDetect *float64 + TCPHystartDelayCwnd *float64 + TCPACKSkippedSynRecv *float64 + TCPACKSkippedPAWS *float64 + TCPACKSkippedSeq *float64 + TCPACKSkippedFinWait2 *float64 + TCPACKSkippedTimeWait *float64 + TCPACKSkippedChallenge *float64 + TCPWinProbe *float64 + TCPKeepAlive *float64 + TCPMTUPFail *float64 + TCPMTUPSuccess *float64 + TCPWqueueTooBig *float64 } type IpExt struct { // nolint:revive - InNoRoutes float64 - InTruncatedPkts float64 - InMcastPkts float64 - OutMcastPkts float64 - InBcastPkts float64 - OutBcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InCsumErrors float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 - ReasmOverlaps float64 + InNoRoutes *float64 + InTruncatedPkts *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InBcastPkts *float64 + OutBcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InCsumErrors *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 + ReasmOverlaps *float64 } func (p Proc) Netstat() (ProcNetstat, error) { @@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) { if err != nil { return ProcNetstat{PID: p.PID}, err } - procNetstat, err := parseNetstat(bytes.NewReader(data), filename) + procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) procNetstat.PID = p.PID return procNetstat, err } -// parseNetstat parses the metrics from proc//net/netstat file +// parseProcNetstat parses the metrics from proc//net/netstat file // and returns a ProcNetstat structure. -func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { +func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { var ( scanner = bufio.NewScanner(r) procNetstat = ProcNetstat{} @@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = value + procNetstat.TcpExt.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = value + procNetstat.TcpExt.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = value + procNetstat.TcpExt.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = value + procNetstat.TcpExt.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = value + procNetstat.TcpExt.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = value + procNetstat.TcpExt.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = value + procNetstat.TcpExt.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = value + procNetstat.TcpExt.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = value + procNetstat.TcpExt.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = value + procNetstat.TcpExt.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = value + procNetstat.TcpExt.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = value + procNetstat.TcpExt.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = value + procNetstat.TcpExt.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = value + procNetstat.TcpExt.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = value + procNetstat.TcpExt.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = value + procNetstat.TcpExt.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = value + procNetstat.TcpExt.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = value + procNetstat.TcpExt.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = value + procNetstat.TcpExt.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = value + procNetstat.TcpExt.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = value + procNetstat.TcpExt.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = value + procNetstat.TcpExt.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = value + procNetstat.TcpExt.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = value + procNetstat.TcpExt.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = value + procNetstat.TcpExt.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = value + procNetstat.TcpExt.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = value + procNetstat.TcpExt.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = value + procNetstat.TcpExt.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = value + procNetstat.TcpExt.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = value + procNetstat.TcpExt.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = value + procNetstat.TcpExt.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = value + procNetstat.TcpExt.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = value + procNetstat.TcpExt.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = value + procNetstat.TcpExt.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = value + procNetstat.TcpExt.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = value + procNetstat.TcpExt.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = value + procNetstat.TcpExt.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = value + procNetstat.TcpExt.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = value + procNetstat.TcpExt.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = value + procNetstat.TcpExt.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = value + procNetstat.TcpExt.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = value + procNetstat.TcpExt.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = value + procNetstat.TcpExt.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = value + procNetstat.TcpExt.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = value + procNetstat.TcpExt.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = value + procNetstat.TcpExt.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = value + procNetstat.TcpExt.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = value + procNetstat.TcpExt.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = value + procNetstat.TcpExt.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = value + procNetstat.TcpExt.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = value + procNetstat.TcpExt.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = value + procNetstat.TcpExt.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = value + procNetstat.TcpExt.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = value + procNetstat.TcpExt.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = value + procNetstat.TcpExt.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = value + procNetstat.TcpExt.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = value + procNetstat.TcpExt.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = value + procNetstat.TcpExt.TCPRcvCoalesce = &value + case "TCPRcvQDrop": + procNetstat.TcpExt.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = value + procNetstat.TcpExt.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = value + procNetstat.TcpExt.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = value + procNetstat.TcpExt.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = value + procNetstat.TcpExt.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = value + procNetstat.TcpExt.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = value + procNetstat.TcpExt.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = value + procNetstat.TcpExt.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = value + procNetstat.TcpExt.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = value + procNetstat.TcpExt.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = value + procNetstat.TcpExt.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = value + procNetstat.TcpExt.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = value + procNetstat.TcpExt.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = value + procNetstat.TcpExt.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = value + procNetstat.TcpExt.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = value + procNetstat.TcpExt.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = value + procNetstat.TcpExt.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = value + procNetstat.TcpExt.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = value + procNetstat.TcpExt.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = value + procNetstat.TcpExt.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = value + procNetstat.TcpExt.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = value + procNetstat.TcpExt.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = value + procNetstat.TcpExt.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = value + procNetstat.TcpExt.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = value + procNetstat.TcpExt.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = value + procNetstat.TcpExt.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = value + procNetstat.TcpExt.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = value + procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = value + procNetstat.TcpExt.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = value + procNetstat.TcpExt.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = value + procNetstat.TcpExt.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = value + procNetstat.TcpExt.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = value + procNetstat.TcpExt.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = value + procNetstat.TcpExt.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = value + procNetstat.TcpExt.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = value + procNetstat.IpExt.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = value + procNetstat.IpExt.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = value + procNetstat.IpExt.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = value + procNetstat.IpExt.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = value + procNetstat.IpExt.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = value + procNetstat.IpExt.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = value + procNetstat.IpExt.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = value + procNetstat.IpExt.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = value + procNetstat.IpExt.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = value + procNetstat.IpExt.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = value + procNetstat.IpExt.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = value + procNetstat.IpExt.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = value + procNetstat.IpExt.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = value + procNetstat.IpExt.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = value + procNetstat.IpExt.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = value + procNetstat.IpExt.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = value + procNetstat.IpExt.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = value + procNetstat.IpExt.ReasmOverlaps = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index ae19189..6c46b71 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -37,100 +37,100 @@ type ProcSnmp struct { } type Ip struct { // nolint:revive - Forwarding float64 - DefaultTTL float64 - InReceives float64 - InHdrErrors float64 - InAddrErrors float64 - ForwDatagrams float64 - InUnknownProtos float64 - InDiscards float64 - InDelivers float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 + Forwarding *float64 + DefaultTTL *float64 + InReceives *float64 + InHdrErrors *float64 + InAddrErrors *float64 + ForwDatagrams *float64 + InUnknownProtos *float64 + InDiscards *float64 + InDelivers *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 } -type Icmp struct { - InMsgs float64 - InErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InTimeExcds float64 - InParmProbs float64 - InSrcQuenchs float64 - InRedirects float64 - InEchos float64 - InEchoReps float64 - InTimestamps float64 - InTimestampReps float64 - InAddrMasks float64 - InAddrMaskReps float64 - OutMsgs float64 - OutErrors float64 - OutDestUnreachs float64 - OutTimeExcds float64 - OutParmProbs float64 - OutSrcQuenchs float64 - OutRedirects float64 - OutEchos float64 - OutEchoReps float64 - OutTimestamps float64 - OutTimestampReps float64 - OutAddrMasks float64 - OutAddrMaskReps float64 +type Icmp struct { // nolint:revive + InMsgs *float64 + InErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InTimeExcds *float64 + InParmProbs *float64 + InSrcQuenchs *float64 + InRedirects *float64 + InEchos *float64 + InEchoReps *float64 + InTimestamps *float64 + InTimestampReps *float64 + InAddrMasks *float64 + InAddrMaskReps *float64 + OutMsgs *float64 + OutErrors *float64 + OutDestUnreachs *float64 + OutTimeExcds *float64 + OutParmProbs *float64 + OutSrcQuenchs *float64 + OutRedirects *float64 + OutEchos *float64 + OutEchoReps *float64 + OutTimestamps *float64 + OutTimestampReps *float64 + OutAddrMasks *float64 + OutAddrMaskReps *float64 } type IcmpMsg struct { - InType3 float64 - OutType3 float64 + InType3 *float64 + OutType3 *float64 } type Tcp struct { // nolint:revive - RtoAlgorithm float64 - RtoMin float64 - RtoMax float64 - MaxConn float64 - ActiveOpens float64 - PassiveOpens float64 - AttemptFails float64 - EstabResets float64 - CurrEstab float64 - InSegs float64 - OutSegs float64 - RetransSegs float64 - InErrs float64 - OutRsts float64 - InCsumErrors float64 + RtoAlgorithm *float64 + RtoMin *float64 + RtoMax *float64 + MaxConn *float64 + ActiveOpens *float64 + PassiveOpens *float64 + AttemptFails *float64 + EstabResets *float64 + CurrEstab *float64 + InSegs *float64 + OutSegs *float64 + RetransSegs *float64 + InErrs *float64 + OutRsts *float64 + InCsumErrors *float64 } type Udp struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } func (p Proc) Snmp() (ProcSnmp, error) { @@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = value + procSnmp.Ip.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = value + procSnmp.Ip.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = value + procSnmp.Ip.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = value + procSnmp.Ip.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = value + procSnmp.Ip.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = value + procSnmp.Ip.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = value + procSnmp.Ip.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = value + procSnmp.Ip.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = value + procSnmp.Ip.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = value + procSnmp.Ip.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = value + procSnmp.Ip.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = value + procSnmp.Ip.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = value + procSnmp.Ip.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = value + procSnmp.Ip.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = value + procSnmp.Ip.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = value + procSnmp.Ip.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = value + procSnmp.Ip.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = value + procSnmp.Ip.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = value + procSnmp.Ip.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = value + procSnmp.Icmp.InMsgs = &value case "InErrors": - procSnmp.Icmp.InErrors = value + procSnmp.Icmp.InErrors = &value case "InCsumErrors": - procSnmp.Icmp.InCsumErrors = value + procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = value + procSnmp.Icmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = value + procSnmp.Icmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = value + procSnmp.Icmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = value + procSnmp.Icmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = value + procSnmp.Icmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = value + procSnmp.Icmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = value + procSnmp.Icmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = value + procSnmp.Icmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = value + procSnmp.Icmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = value + procSnmp.Icmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = value + procSnmp.Icmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = value + procSnmp.Icmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = value + procSnmp.Icmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = value + procSnmp.Icmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = value + procSnmp.Icmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = value + procSnmp.Icmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = value + procSnmp.Icmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = value + procSnmp.Icmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = value + procSnmp.Icmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = value + procSnmp.Icmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = value + procSnmp.Icmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = value + procSnmp.Icmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = value + procSnmp.Icmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = value + procSnmp.Icmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = value + procSnmp.IcmpMsg.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = value + procSnmp.IcmpMsg.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = value + procSnmp.Tcp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = value + procSnmp.Tcp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = value + procSnmp.Tcp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = value + procSnmp.Tcp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = value + procSnmp.Tcp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = value + procSnmp.Tcp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = value + procSnmp.Tcp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = value + procSnmp.Tcp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = value + procSnmp.Tcp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = value + procSnmp.Tcp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = value + procSnmp.Tcp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = value + procSnmp.Tcp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = value + procSnmp.Tcp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = value + procSnmp.Tcp.OutRsts = &value case "InCsumErrors": - procSnmp.Tcp.InCsumErrors = value + procSnmp.Tcp.InCsumErrors = &value } case "Udp": switch key { case "InDatagrams": - procSnmp.Udp.InDatagrams = value + procSnmp.Udp.InDatagrams = &value case "NoPorts": - procSnmp.Udp.NoPorts = value + procSnmp.Udp.NoPorts = &value case "InErrors": - procSnmp.Udp.InErrors = value + procSnmp.Udp.InErrors = &value case "OutDatagrams": - procSnmp.Udp.OutDatagrams = value + procSnmp.Udp.OutDatagrams = &value case "RcvbufErrors": - procSnmp.Udp.RcvbufErrors = value + procSnmp.Udp.RcvbufErrors = &value case "SndbufErrors": - procSnmp.Udp.SndbufErrors = value + procSnmp.Udp.SndbufErrors = &value case "InCsumErrors": - procSnmp.Udp.InCsumErrors = value + procSnmp.Udp.InCsumErrors = &value case "IgnoredMulti": - procSnmp.Udp.IgnoredMulti = value + procSnmp.Udp.IgnoredMulti = &value } case "UdpLite": switch key { case "InDatagrams": - procSnmp.UdpLite.InDatagrams = value + procSnmp.UdpLite.InDatagrams = &value case "NoPorts": - procSnmp.UdpLite.NoPorts = value + procSnmp.UdpLite.NoPorts = &value case "InErrors": - procSnmp.UdpLite.InErrors = value + procSnmp.UdpLite.InErrors = &value case "OutDatagrams": - procSnmp.UdpLite.OutDatagrams = value + procSnmp.UdpLite.OutDatagrams = &value case "RcvbufErrors": - procSnmp.UdpLite.RcvbufErrors = value + procSnmp.UdpLite.RcvbufErrors = &value case "SndbufErrors": - procSnmp.UdpLite.SndbufErrors = value + procSnmp.UdpLite.SndbufErrors = &value case "InCsumErrors": - procSnmp.UdpLite.InCsumErrors = value + procSnmp.UdpLite.InCsumErrors = &value case "IgnoredMulti": - procSnmp.UdpLite.IgnoredMulti = value + procSnmp.UdpLite.IgnoredMulti = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index f611992..3059cc6 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -36,106 +36,106 @@ type ProcSnmp6 struct { } type Ip6 struct { // nolint:revive - InReceives float64 - InHdrErrors float64 - InTooBigErrors float64 - InNoRoutes float64 - InAddrErrors float64 - InUnknownProtos float64 - InTruncatedPkts float64 - InDiscards float64 - InDelivers float64 - OutForwDatagrams float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 - InMcastPkts float64 - OutMcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 + InReceives *float64 + InHdrErrors *float64 + InTooBigErrors *float64 + InNoRoutes *float64 + InAddrErrors *float64 + InUnknownProtos *float64 + InTruncatedPkts *float64 + InDiscards *float64 + InDelivers *float64 + OutForwDatagrams *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 } type Icmp6 struct { - InMsgs float64 - InErrors float64 - OutMsgs float64 - OutErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InPktTooBigs float64 - InTimeExcds float64 - InParmProblems float64 - InEchos float64 - InEchoReplies float64 - InGroupMembQueries float64 - InGroupMembResponses float64 - InGroupMembReductions float64 - InRouterSolicits float64 - InRouterAdvertisements float64 - InNeighborSolicits float64 - InNeighborAdvertisements float64 - InRedirects float64 - InMLDv2Reports float64 - OutDestUnreachs float64 - OutPktTooBigs float64 - OutTimeExcds float64 - OutParmProblems float64 - OutEchos float64 - OutEchoReplies float64 - OutGroupMembQueries float64 - OutGroupMembResponses float64 - OutGroupMembReductions float64 - OutRouterSolicits float64 - OutRouterAdvertisements float64 - OutNeighborSolicits float64 - OutNeighborAdvertisements float64 - OutRedirects float64 - OutMLDv2Reports float64 - InType1 float64 - InType134 float64 - InType135 float64 - InType136 float64 - InType143 float64 - OutType133 float64 - OutType135 float64 - OutType136 float64 - OutType143 float64 + InMsgs *float64 + InErrors *float64 + OutMsgs *float64 + OutErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InPktTooBigs *float64 + InTimeExcds *float64 + InParmProblems *float64 + InEchos *float64 + InEchoReplies *float64 + InGroupMembQueries *float64 + InGroupMembResponses *float64 + InGroupMembReductions *float64 + InRouterSolicits *float64 + InRouterAdvertisements *float64 + InNeighborSolicits *float64 + InNeighborAdvertisements *float64 + InRedirects *float64 + InMLDv2Reports *float64 + OutDestUnreachs *float64 + OutPktTooBigs *float64 + OutTimeExcds *float64 + OutParmProblems *float64 + OutEchos *float64 + OutEchoReplies *float64 + OutGroupMembQueries *float64 + OutGroupMembResponses *float64 + OutGroupMembReductions *float64 + OutRouterSolicits *float64 + OutRouterAdvertisements *float64 + OutNeighborSolicits *float64 + OutNeighborAdvertisements *float64 + OutRedirects *float64 + OutMLDv2Reports *float64 + InType1 *float64 + InType134 *float64 + InType135 *float64 + InType136 *float64 + InType143 *float64 + OutType133 *float64 + OutType135 *float64 + OutType136 *float64 + OutType143 *float64 } type Udp6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 } func (p Proc) Snmp6() (ProcSnmp6, error) { @@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = value + procSnmp6.Ip6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = value + procSnmp6.Ip6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = value + procSnmp6.Ip6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = value + procSnmp6.Ip6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = value + procSnmp6.Ip6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = value + procSnmp6.Ip6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = value + procSnmp6.Ip6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = value + procSnmp6.Ip6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = value + procSnmp6.Ip6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = value + procSnmp6.Ip6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = value + procSnmp6.Ip6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = value + procSnmp6.Ip6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = value + procSnmp6.Ip6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = value + procSnmp6.Ip6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = value + procSnmp6.Ip6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = value + procSnmp6.Ip6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = value + procSnmp6.Ip6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = value + procSnmp6.Ip6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = value + procSnmp6.Ip6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = value + procSnmp6.Ip6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = value + procSnmp6.Ip6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = value + procSnmp6.Ip6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = value + procSnmp6.Ip6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = value + procSnmp6.Ip6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = value + procSnmp6.Ip6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = value + procSnmp6.Ip6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = value + procSnmp6.Ip6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = value + procSnmp6.Ip6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = value + procSnmp6.Ip6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = value + procSnmp6.Ip6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = value + procSnmp6.Ip6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = value + procSnmp6.Ip6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = value + procSnmp6.Icmp6.InMsgs = &value case "InErrors": - procSnmp6.Icmp6.InErrors = value + procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = value + procSnmp6.Icmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = value + procSnmp6.Icmp6.OutErrors = &value case "InCsumErrors": - procSnmp6.Icmp6.InCsumErrors = value + procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = value + procSnmp6.Icmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = value + procSnmp6.Icmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = value + procSnmp6.Icmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = value + procSnmp6.Icmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = value + procSnmp6.Icmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = value + procSnmp6.Icmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = value + procSnmp6.Icmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = value + procSnmp6.Icmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = value + procSnmp6.Icmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = value + procSnmp6.Icmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = value + procSnmp6.Icmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = value + procSnmp6.Icmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = value + procSnmp6.Icmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = value + procSnmp6.Icmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = value + procSnmp6.Icmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = value + procSnmp6.Icmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = value + procSnmp6.Icmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = value + procSnmp6.Icmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = value + procSnmp6.Icmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = value + procSnmp6.Icmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = value + procSnmp6.Icmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = value + procSnmp6.Icmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = value + procSnmp6.Icmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = value + procSnmp6.Icmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = value + procSnmp6.Icmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = value + procSnmp6.Icmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = value + procSnmp6.Icmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = value + procSnmp6.Icmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = value + procSnmp6.Icmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = value + procSnmp6.Icmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = value + procSnmp6.Icmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = value + procSnmp6.Icmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = value + procSnmp6.Icmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = value + procSnmp6.Icmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = value + procSnmp6.Icmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = value + procSnmp6.Icmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = value + procSnmp6.Icmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = value + procSnmp6.Icmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = value + procSnmp6.Icmp6.OutType143 = &value } case "Udp6": switch key { case "InDatagrams": - procSnmp6.Udp6.InDatagrams = value + procSnmp6.Udp6.InDatagrams = &value case "NoPorts": - procSnmp6.Udp6.NoPorts = value + procSnmp6.Udp6.NoPorts = &value case "InErrors": - procSnmp6.Udp6.InErrors = value + procSnmp6.Udp6.InErrors = &value case "OutDatagrams": - procSnmp6.Udp6.OutDatagrams = value + procSnmp6.Udp6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.Udp6.RcvbufErrors = value + procSnmp6.Udp6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.Udp6.SndbufErrors = value + procSnmp6.Udp6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.Udp6.InCsumErrors = value + procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = value + procSnmp6.Udp6.IgnoredMulti = &value } case "UdpLite6": switch key { case "InDatagrams": - procSnmp6.UdpLite6.InDatagrams = value + procSnmp6.UdpLite6.InDatagrams = &value case "NoPorts": - procSnmp6.UdpLite6.NoPorts = value + procSnmp6.UdpLite6.NoPorts = &value case "InErrors": - procSnmp6.UdpLite6.InErrors = value + procSnmp6.UdpLite6.InErrors = &value case "OutDatagrams": - procSnmp6.UdpLite6.OutDatagrams = value + procSnmp6.UdpLite6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.UdpLite6.RcvbufErrors = value + procSnmp6.UdpLite6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.UdpLite6.SndbufErrors = value + procSnmp6.UdpLite6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.UdpLite6.InCsumErrors = value + procSnmp6.UdpLite6.InCsumErrors = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 06c556e..b278eb2 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -102,6 +102,8 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // CPU number last executed on. + Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes // scheduled under a real-time policy, or 0, for non-real-time processes. RTPriority uint @@ -184,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) { &ignoreUint64, &ignoreUint64, &ignoreInt64, - &ignoreInt64, + &s.Processor, &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 594022d..3d8c064 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) { kv := strings.SplitN(line, ":", 2) // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) + v = strings.TrimSuffix(v, " kB") // value to int when possible // we can skip error check here, 'cause vKBytes is not used when value is a string diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 33f97ca..586af48 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -62,7 +62,7 @@ type Stat struct { // Summed up cpu statistics. CPUTotal CPUStat // Per-CPU statistics. - CPU []CPUStat + CPU map[int64]CPUStat // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. IRQTotal uint64 // Number of times a numbered IRQ was triggered. @@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) { if err != nil { return Stat{}, err } + procStat, err := parseStat(bytes.NewReader(data), fileName) + if err != nil { + return Stat{}, err + } + return procStat, nil +} - stat := Stat{} +// parseStat parses the metrics from /proc/[pid]/stat. +func parseStat(r io.Reader, fileName string) (Stat, error) { + var ( + scanner = bufio.NewScanner(r) + stat = Stat{ + CPU: make(map[int64]CPUStat), + } + err error + ) - scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) { if cpuID == -1 { stat.CPUTotal = cpuStat } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } stat.CPU[cpuID] = cpuStat } } diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go new file mode 100644 index 0000000..f08bfc7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + + fsi "github.com/prometheus/procfs/internal/fs" +) + +// Provide access to /proc/PID/task/TID files, for thread specific values. Since +// such files have the same structure as /proc/PID/ ones, the data structures +// and the parsers for the latter may be reused. + +// AllThreads returns a list of all currently available threads under /proc/PID. +func AllThreads(pid int) (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllThreads(pid) +} + +// AllThreads returns a list of all currently available threads for PID. +func (fs FS) AllThreads(pid int) (Procs, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + d, err := os.Open(taskPath) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + } + + t := Procs{} + for _, n := range names { + tid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + } + + return t, nil +} + +// Thread returns a process for a given PID, TID. +func (fs FS) Thread(pid, tid int) (Proc, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + if _, err := os.Stat(taskPath); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil +} + +// Thread returns a process for a given TID of Proc. +func (proc Proc) Thread(tid int) (Proc, error) { + tfs := fsi.FS(proc.path("task")) + if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: tfs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index 20ceb77..cdedcae 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -26,7 +26,9 @@ import ( ) // The VM interface is described at -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array // and numa_zonelist_order (deprecated) which is a string. diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index fa1245b..2924cf3 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,7 +8,6 @@ import ( "fmt" "math" "os" - "path/filepath" "reflect" "regexp" "runtime" @@ -141,12 +140,11 @@ func CallerInfo() []string { } parts := strings.Split(file, "/") - file = parts[len(parts)-1] if len(parts) > 1 { + filename := parts[len(parts)-1] dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - path, _ := filepath.Abs(file) - callers = append(callers, fmt.Sprintf("%s:%d", path, line)) + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) } } @@ -530,7 +528,7 @@ func isNil(object interface{}) bool { []reflect.Kind{ reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice}, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, kind) if isNilableKind && value.IsNil() { @@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + if !av.IsValid() { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) } } return true } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...) } if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...) } } @@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { + if !av.IsValid() { + return true + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { return true } } @@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md index 3ba0527..cfd2e6a 100644 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,22 @@ Releases ======== +v1.10.0 (2023-03-08) +==================== + +- Comply with Go 1.20's multiple-error interface. +- Drop Go 1.18 support. + Per the support policy, only Go 1.19 and 1.20 are supported now. +- Drop all non-test external dependencies. + +v1.9.0 (2022-12-12) +=================== + +- Add `AppendFunc` that allow passsing functions to similar to + `AppendInvoke`. + +- Bump up yaml.v3 dependency to 3.0.1. + v1.8.0 (2022-02-28) =================== diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md index 70aacec..5ab6ac4 100644 --- a/vendor/go.uber.org/multierr/README.md +++ b/vendor/go.uber.org/multierr/README.md @@ -2,9 +2,29 @@ `multierr` allows combining one or more Go `error`s together. +## Features + +- **Idiomatic**: + multierr follows best practices in Go, and keeps your code idiomatic. + - It keeps the underlying error type hidden, + allowing you to deal in `error` values exclusively. + - It provides APIs to safely append into an error from a `defer` statement. +- **Performant**: + multierr is optimized for performance: + - It avoids allocations where possible. + - It utilizes slice resizing semantics to optimize common cases + like appending into the same error object from a loop. +- **Interoperable**: + multierr interoperates with the Go standard library's error APIs seamlessly: + - The `errors.Is` and `errors.As` functions *just work*. +- **Lightweight**: + multierr comes with virtually no dependencies. + ## Installation - go get -u go.uber.org/multierr +```bash +go get -u go.uber.org/multierr@latest +``` ## Status diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go index f45af14..4ee4b9f 100644 --- a/vendor/go.uber.org/multierr/error.go +++ b/vendor/go.uber.org/multierr/error.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017-2021 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,106 +20,109 @@ // Package multierr allows combining one or more errors together. // -// Overview +// # Overview // // Errors can be combined with the use of the Combine function. // -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) // // If only two errors are being combined, the Append function may be used // instead. // -// err = multierr.Append(reader.Close(), writer.Close()) +// err = multierr.Append(reader.Close(), writer.Close()) // // The underlying list of errors for a returned error object may be retrieved // with the Errors function. // -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:", errors) -// } +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } // -// Appending from a loop +// # Appending from a loop // // You sometimes need to append into an error from a loop. // -// var err error -// for _, item := range items { -// err = multierr.Append(err, process(item)) -// } +// var err error +// for _, item := range items { +// err = multierr.Append(err, process(item)) +// } // // Cases like this may require knowledge of whether an individual instance // failed. This usually requires introduction of a new variable. // -// var err error -// for _, item := range items { -// if perr := process(item); perr != nil { -// log.Warn("skipping item", item) -// err = multierr.Append(err, perr) -// } -// } +// var err error +// for _, item := range items { +// if perr := process(item); perr != nil { +// log.Warn("skipping item", item) +// err = multierr.Append(err, perr) +// } +// } // // multierr includes AppendInto to simplify cases like this. // -// var err error -// for _, item := range items { -// if multierr.AppendInto(&err, process(item)) { -// log.Warn("skipping item", item) -// } -// } +// var err error +// for _, item := range items { +// if multierr.AppendInto(&err, process(item)) { +// log.Warn("skipping item", item) +// } +// } // // This will append the error into the err variable, and return true if that // individual error was non-nil. // -// See AppendInto for more information. +// See [AppendInto] for more information. // -// Deferred Functions +// # Deferred Functions // // Go makes it possible to modify the return value of a function in a defer // block if the function was using named returns. This makes it possible to // record resource cleanup failures from deferred blocks. // -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } // // multierr provides the Invoker type and AppendInvoke function to make cases // like the above simpler and obviate the need for a closure. The following is // roughly equivalent to the example above. // -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(conn)) -// // ... -// } +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(conn)) +// // ... +// } // -// See AppendInvoke and Invoker for more information. +// See [AppendInvoke] and [Invoker] for more information. // -// Advanced Usage +// NOTE: If you're modifying an error from inside a defer, you MUST use a named +// return value for that function. +// +// # Advanced Usage // // Errors returned by Combine and Append MAY implement the following // interface. // -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } // // Note that if you need access to list of errors behind a multierr error, you // should prefer using the Errors function. That said, if you need cheap @@ -128,24 +131,22 @@ // because errors returned by Combine and Append are not guaranteed to // implement this interface. // -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } package multierr // import "go.uber.org/multierr" import ( "bytes" - "errors" "fmt" "io" "strings" "sync" - - "go.uber.org/atomic" + "sync/atomic" ) var ( @@ -185,8 +186,8 @@ type errorGroup interface { // Errors returns a slice containing zero or more errors that the supplied // error is composed of. If the error is nil, a nil slice is returned. // -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) // // If the error is not composed of other errors, the returned slice contains // just the error that was passed in. @@ -209,10 +210,7 @@ func Errors(err error) []error { return []error{err} } - errors := eg.Errors() - result := make([]error, len(errors)) - copy(result, errors) - return result + return append(([]error)(nil), eg.Errors()...) } // multiError is an error that holds one or more errors. @@ -239,33 +237,6 @@ func (merr *multiError) Errors() []error { return merr.errors } -// As attempts to find the first error in the error list that matches the type -// of the value that target points to. -// -// This function allows errors.As to traverse the values stored on the -// multierr error. -func (merr *multiError) As(target interface{}) bool { - for _, err := range merr.Errors() { - if errors.As(err, target) { - return true - } - } - return false -} - -// Is attempts to match the provided error against errors in the error list. -// -// This function allows errors.Is to traverse the values stored on the -// multierr error. -func (merr *multiError) Is(target error) bool { - for _, err := range merr.Errors() { - if errors.Is(err, target) { - return true - } - } - return false -} - func (merr *multiError) Error() string { if merr == nil { return "" @@ -393,8 +364,7 @@ func fromSlice(errors []error) error { // Otherwise "errors" escapes to the heap // unconditionally for all other cases. // This lets us optimize for the "no errors" case. - out := make([]error, len(errors)) - copy(out, errors) + out := append(([]error)(nil), errors...) return &multiError{errors: out} } } @@ -420,32 +390,32 @@ func fromSlice(errors []error) error { // If zero arguments were passed or if all items are nil, a nil error is // returned. // -// Combine(nil, nil) // == nil +// Combine(nil, nil) // == nil // // If only a single error was passed, it is returned as-is. // -// Combine(err) // == err +// Combine(err) // == err // // Combine skips over nil arguments so this function may be used to combine // together errors from operations that fail independently of each other. // -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) // // If any of the passed errors is a multierr error, it will be flattened along // with the other errors. // -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) // // The returned error formats into a readable multi-line error message if // formatted with %+v. // -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) func Combine(errors ...error) error { return fromSlice(errors) } @@ -455,16 +425,19 @@ func Combine(errors ...error) error { // This function is a specialization of Combine for the common case where // there are only two errors. // -// err = multierr.Append(reader.Close(), writer.Close()) +// err = multierr.Append(reader.Close(), writer.Close()) // // The following pattern may also be used to record failure of deferred // operations without losing information about the original error. // -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +// +// Note that the variable MUST be a named return to append an error to it from +// the defer statement. See also [AppendInvoke]. func Append(left error, right error) error { switch { case left == nil: @@ -494,37 +467,37 @@ func Append(left error, right error) error { // AppendInto appends an error into the destination of an error pointer and // returns whether the error being appended was non-nil. // -// var err error -// multierr.AppendInto(&err, r.Close()) -// multierr.AppendInto(&err, w.Close()) +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) // // The above is equivalent to, // -// err := multierr.Append(r.Close(), w.Close()) +// err := multierr.Append(r.Close(), w.Close()) // // As AppendInto reports whether the provided error was non-nil, it may be // used to build a multierr error in a loop more ergonomically. For example: // -// var err error -// for line := range lines { -// var item Item -// if multierr.AppendInto(&err, parse(line, &item)) { -// continue -// } -// items = append(items, item) -// } +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } // // Compare this with a version that relies solely on Append: // -// var err error -// for line := range lines { -// var item Item -// if parseErr := parse(line, &item); parseErr != nil { -// err = multierr.Append(err, parseErr) -// continue -// } -// items = append(items, item) -// } +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } func AppendInto(into *error, err error) (errored bool) { if into == nil { // We panic if 'into' is nil. This is not documented above @@ -545,7 +518,7 @@ func AppendInto(into *error, err error) (errored bool) { // AppendInvoke to append the result of calling the function into an error. // This allows you to conveniently defer capture of failing operations. // -// See also, Close and Invoke. +// See also, [Close] and [Invoke]. type Invoker interface { Invoke() error } @@ -556,19 +529,22 @@ type Invoker interface { // // For example, // -// func processReader(r io.Reader) (err error) { -// scanner := bufio.NewScanner(r) -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// for scanner.Scan() { -// // ... -// } -// // ... -// } +// func processReader(r io.Reader) (err error) { +// scanner := bufio.NewScanner(r) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// for scanner.Scan() { +// // ... +// } +// // ... +// } // // In this example, the following line will construct the Invoker right away, // but defer the invocation of scanner.Err() until the function returns. // -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. type Invoke func() error // Invoke calls the supplied function and returns its result. @@ -579,19 +555,22 @@ func (i Invoke) Invoke() error { return i() } // // For example, // -// func processFile(path string) (err error) { -// f, err := os.Open(path) -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// return processReader(f) -// } +// func processFile(path string) (err error) { +// f, err := os.Open(path) +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// return processReader(f) +// } // // In this example, multierr.Close will construct the Invoker right away, but // defer the invocation of f.Close until the function returns. // -// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. func Close(closer io.Closer) Invoker { return Invoke(closer.Close) } @@ -601,52 +580,73 @@ func Close(closer io.Closer) Invoker { // invocation of fallible operations until a function returns, and capture the // resulting errors. // -// func doSomething(...) (err error) { -// // ... -// f, err := openFile(..) -// if err != nil { -// return err -// } +// func doSomething(...) (err error) { +// // ... +// f, err := openFile(..) +// if err != nil { +// return err +// } // -// // multierr will call f.Close() when this function returns and -// // if the operation fails, its append its error into the -// // returned error. -// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// // multierr will call f.Close() when this function returns and +// // if the operation fails, its append its error into the +// // returned error. +// defer multierr.AppendInvoke(&err, multierr.Close(f)) // -// scanner := bufio.NewScanner(f) -// // Similarly, this scheduled scanner.Err to be called and -// // inspected when the function returns and append its error -// // into the returned error. -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// scanner := bufio.NewScanner(f) +// // Similarly, this scheduled scanner.Err to be called and +// // inspected when the function returns and append its error +// // into the returned error. +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) // -// // ... -// } +// // ... +// } +// +// NOTE: If used with a defer, the error variable MUST be a named return. // // Without defer, AppendInvoke behaves exactly like AppendInto. // -// err := // ... -// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) +// err := // ... +// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) // -// // ...is roughly equivalent to... +// // ...is roughly equivalent to... // -// err := // ... -// multierr.AppendInto(&err, foo()) +// err := // ... +// multierr.AppendInto(&err, foo()) // // The advantage of the indirection introduced by Invoker is to make it easy // to defer the invocation of a function. Without this indirection, the // invoked function will be evaluated at the time of the defer block rather // than when the function returns. // -// // BAD: This is likely not what the caller intended. This will evaluate -// // foo() right away and append its result into the error when the -// // function returns. -// defer multierr.AppendInto(&err, foo()) +// // BAD: This is likely not what the caller intended. This will evaluate +// // foo() right away and append its result into the error when the +// // function returns. +// defer multierr.AppendInto(&err, foo()) // -// // GOOD: This will defer invocation of foo unutil the function returns. -// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) +// // GOOD: This will defer invocation of foo unutil the function returns. +// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) // // multierr provides a few Invoker implementations out of the box for -// convenience. See Invoker for more information. +// convenience. See [Invoker] for more information. func AppendInvoke(into *error, invoker Invoker) { AppendInto(into, invoker.Invoke()) } + +// AppendFunc is a shorthand for [AppendInvoke]. +// It allows using function or method value directly +// without having to wrap it into an [Invoker] interface. +// +// func doSomething(...) (err error) { +// w, err := startWorker(...) +// if err != nil { +// return err +// } +// +// // multierr will call w.Stop() when this function returns and +// // if the operation fails, it appends its error into the +// // returned error. +// defer multierr.AppendFunc(&err, w.Stop) +// } +func AppendFunc(into *error, fn func() error) { + AppendInvoke(into, Invoke(fn)) +} diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go new file mode 100644 index 0000000..0b00bec --- /dev/null +++ b/vendor/go.uber.org/multierr/error_post_go120.go @@ -0,0 +1,29 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.20 +// +build go1.20 + +package multierr + +// Unwrap returns a list of errors wrapped by this multierr. +func (merr *multiError) Unwrap() []error { + return merr.Errors() +} diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go new file mode 100644 index 0000000..8da10f1 --- /dev/null +++ b/vendor/go.uber.org/multierr/error_pre_go120.go @@ -0,0 +1,59 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !go1.20 +// +build !go1.20 + +package multierr + +import "errors" + +// Versions of Go before 1.20 did not support the Unwrap() []error method. +// This provides a similar behavior by implementing the Is(..) and As(..) +// methods. +// See the errors.Join proposal for details: +// https://github.com/golang/go/issues/53435 + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml deleted file mode 100644 index 6ef084e..0000000 --- a/vendor/go.uber.org/multierr/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: go.uber.org/multierr -import: -- package: go.uber.org/atomic - version: ^1 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index da36c37..0db1f9f 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,6 +3,19 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## 1.24.0 (30 Nov 2022) + +Enhancements: +* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the + current minimum enabled log level. +* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically. + +Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their +contributions to this release. + +[#1148]: https://github.coml/uber-go/zap/pull/1148 +[#1185]: https://github.coml/uber-go/zap/pull/1185 + ## 1.23.0 (24 Aug 2022) Enhancements: diff --git a/vendor/go.uber.org/zap/array_go118.go b/vendor/go.uber.org/zap/array_go118.go index ea94f9d..d0d2c49 100644 --- a/vendor/go.uber.org/zap/array_go118.go +++ b/vendor/go.uber.org/zap/array_go118.go @@ -76,9 +76,9 @@ func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { return nil } -// objectMarshalerPtr is a constraint that specifies that the given type +// ObjectMarshalerPtr is a constraint that specifies that the given type // implements zapcore.ObjectMarshaler on a pointer receiver. -type objectMarshalerPtr[T any] interface { +type ObjectMarshalerPtr[T any] interface { *T zapcore.ObjectMarshaler } @@ -105,11 +105,11 @@ type objectMarshalerPtr[T any] interface { // // var requests []*Request = ... // logger.Info("sending requests", zap.Objects("requests", requests)) -func ObjectValues[T any, P objectMarshalerPtr[T]](key string, values []T) Field { +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { return Array(key, objectValues[T, P](values)) } -type objectValues[T any, P objectMarshalerPtr[T]] []T +type objectValues[T any, P ObjectMarshalerPtr[T]] []T func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { for i := range os { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index b5f9a99..cd44030 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -183,6 +183,13 @@ func (log *Logger) With(fields ...Field) *Logger { return l } +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + // Check returns a CheckedEntry if logging a message at the specified level // is enabled. It's a completely optional optimization; in high-performance // applications, Check can help avoid allocating a slice to hold fields. diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 1511166..c4f3bca 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -133,7 +133,8 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { } // OnFatal sets the action to take on fatal logs. -// Deprecated: Use WithFatalHook instead. +// +// Deprecated: Use [WithFatalHook] instead. func OnFatal(action zapcore.CheckWriteAction) Option { return WithFatalHook(action) } diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index df46fa8..478c9a1 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -26,6 +26,7 @@ import ( "io" "net/url" "os" + "path/filepath" "strings" "sync" @@ -34,23 +35,7 @@ import ( const schemeFile = "file" -var ( - _sinkMutex sync.RWMutex - _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme -) - -func init() { - resetSinkRegistry() -} - -func resetSinkRegistry() { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - _sinkFactories = map[string]func(*url.URL) (Sink, error){ - schemeFile: newFileSink, - } -} +var _sinkRegistry = newSinkRegistry() // Sink defines the interface to write to and close logger destinations. type Sink interface { @@ -58,10 +43,6 @@ type Sink interface { io.Closer } -type nopCloserSink struct{ zapcore.WriteSyncer } - -func (nopCloserSink) Close() error { return nil } - type errSinkNotFound struct { scheme string } @@ -70,16 +51,29 @@ func (e *errSinkNotFound) Error() string { return fmt.Sprintf("no sink found for scheme %q", e.scheme) } -// RegisterSink registers a user-supplied factory for all sinks with a -// particular scheme. -// -// All schemes must be ASCII, valid under section 3.1 of RFC 3986 -// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already -// have a factory registered. Zap automatically registers a factory for the -// "file" scheme. -func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type sinkRegistry struct { + mu sync.Mutex + factories map[string]func(*url.URL) (Sink, error) // keyed by scheme + openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile +} + +func newSinkRegistry() *sinkRegistry { + sr := &sinkRegistry{ + factories: make(map[string]func(*url.URL) (Sink, error)), + openFile: os.OpenFile, + } + sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + return sr +} + +// RegisterScheme registers the given factory for the specific scheme. +func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + sr.mu.Lock() + defer sr.mu.Unlock() if scheme == "" { return errors.New("can't register a sink factory for empty string") @@ -88,14 +82,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { if err != nil { return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) } - if _, ok := _sinkFactories[normalized]; ok { + if _, ok := sr.factories[normalized]; ok { return fmt.Errorf("sink factory already registered for scheme %q", normalized) } - _sinkFactories[normalized] = factory + sr.factories[normalized] = factory return nil } -func newSink(rawURL string) (Sink, error) { +func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) { + // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to + // the drive, and path is unset unless `c:/log.txt` is used. + // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file. + // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows. + if filepath.IsAbs(rawURL) { + return sr.newFileSinkFromPath(rawURL) + } + u, err := url.Parse(rawURL) if err != nil { return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) @@ -104,16 +106,27 @@ func newSink(rawURL string) (Sink, error) { u.Scheme = schemeFile } - _sinkMutex.RLock() - factory, ok := _sinkFactories[u.Scheme] - _sinkMutex.RUnlock() + sr.mu.Lock() + factory, ok := sr.factories[u.Scheme] + sr.mu.Unlock() if !ok { return nil, &errSinkNotFound{u.Scheme} } return factory(u) } -func newFileSink(u *url.URL) (Sink, error) { +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 0.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + return _sinkRegistry.RegisterSink(scheme, factory) +} + +func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) { if u.User != nil { return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) } @@ -130,13 +143,18 @@ func newFileSink(u *url.URL) (Sink, error) { if hn := u.Hostname(); hn != "" && hn != "localhost" { return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) } - switch u.Path { + + return sr.newFileSinkFromPath(u.Path) +} + +func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { + switch path { case "stdout": return nopCloserSink{os.Stdout}, nil case "stderr": return nopCloserSink{os.Stderr}, nil } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) } func normalizeScheme(s string) (string, error) { diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go index 3d187fa..817a3bd 100644 --- a/vendor/go.uber.org/zap/stacktrace.go +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -154,7 +154,7 @@ func newStackFormatter(b *buffer.Buffer) stackFormatter { // the final runtime.main/runtime.goexit frame. func (sf *stackFormatter) FormatStack(stack *stacktrace) { // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which + // frame, but we ignore this frame. The last frame is a runtime frame which // adds noise, since it's only either runtime.main or runtime.goexit. for frame, more := stack.Next(); more; frame, more = stack.Next() { sf.FormatFrame(frame) diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index c450b2d..ac387b3 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -31,6 +31,7 @@ import ( const ( _oddNumberErrMsg = "Ignored key without a value." _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." ) // A SugaredLogger wraps the base Logger functionality in a slower, but less @@ -114,6 +115,13 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + // Debug uses fmt.Sprint to construct and log a message. func (s *SugaredLogger) Debug(args ...interface{}) { s.log(DebugLevel, "", args, nil) @@ -329,10 +337,13 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { return nil } - // Allocate enough space for the worst case; if users pass only structured - // fields, we shouldn't penalize them with extra allocations. - fields := make([]Field, 0, len(args)) - var invalid invalidPairs + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) for i := 0; i < len(args); { // This is a strongly-typed field. Consume it and move on. @@ -342,6 +353,18 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { continue } + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, Error(err)) + } else { + s.base.Error(_multipleErrMsg, Error(err)) + } + i++ + continue + } + // Make sure this element isn't a dangling key. if i == len(args)-1 { s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go index 2c3dbd4..f08728e 100644 --- a/vendor/go.uber.org/zap/writer.go +++ b/vendor/go.uber.org/zap/writer.go @@ -68,7 +68,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { var openErr error for _, path := range paths { - sink, err := newSink(path) + sink, err := _sinkRegistry.newSink(path) if err != nil { openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err)) continue diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index ea0431e..9d326e9 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -281,7 +281,8 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { // Should sets this CheckedEntry's CheckWriteAction, which controls whether a // Core will panic or fatal after writing this log entry. Like AddCore, it's // safe to call on nil CheckedEntry references. -// Deprecated: Use After(ent Entry, after CheckWriteHook) instead. +// +// Deprecated: Use [CheckedEntry.After] instead. func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { return ce.After(ent, should) } diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go deleted file mode 100644 index 233b8b6..0000000 --- a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "errors" - "unicode/utf16" -) - -// bmpString returns s encoded in UCS-2 with a zero terminator. -func bmpString(s string) ([]byte, error) { - // References: - // https://tools.ietf.org/html/rfc7292#appendix-B.1 - // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane - // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes - // EncodeRune returns 0xfffd if the rune does not need special encoding - // - the above RFC provides the info that BMPStrings are NULL terminated. - - ret := make([]byte, 0, 2*len(s)+2) - - for _, r := range s { - if t, _ := utf16.EncodeRune(r); t != 0xfffd { - return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") - } - ret = append(ret, byte(r/256), byte(r%256)) - } - - return append(ret, 0, 0), nil -} - -func decodeBMPString(bmpString []byte) (string, error) { - if len(bmpString)%2 != 0 { - return "", errors.New("pkcs12: odd-length BMP string") - } - - // strip terminator if present - if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { - bmpString = bmpString[:l-2] - } - - s := make([]uint16, 0, len(bmpString)/2) - for len(bmpString) > 0 { - s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) - bmpString = bmpString[2:] - } - - return string(utf16.Decode(s)), nil -} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go deleted file mode 100644 index 96f4a1a..0000000 --- a/vendor/golang.org/x/crypto/pkcs12/crypto.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "bytes" - "crypto/cipher" - "crypto/des" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - - "golang.org/x/crypto/pkcs12/internal/rc2" -) - -var ( - oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) - oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) -) - -// pbeCipher is an abstraction of a PKCS#12 cipher. -type pbeCipher interface { - // create returns a cipher.Block given a key. - create(key []byte) (cipher.Block, error) - // deriveKey returns a key derived from the given password and salt. - deriveKey(salt, password []byte, iterations int) []byte - // deriveKey returns an IV derived from the given password and salt. - deriveIV(salt, password []byte, iterations int) []byte -} - -type shaWithTripleDESCBC struct{} - -func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { - return des.NewTripleDESCipher(key) -} - -func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) -} - -func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) -} - -type shaWith40BitRC2CBC struct{} - -func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { - return rc2.New(key, len(key)*8) -} - -func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) -} - -func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) -} - -type pbeParams struct { - Salt []byte - Iterations int -} - -func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { - var cipherType pbeCipher - - switch { - case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): - cipherType = shaWithTripleDESCBC{} - case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): - cipherType = shaWith40BitRC2CBC{} - default: - return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") - } - - var params pbeParams - if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { - return nil, 0, err - } - - key := cipherType.deriveKey(params.Salt, password, params.Iterations) - iv := cipherType.deriveIV(params.Salt, password, params.Iterations) - - block, err := cipherType.create(key) - if err != nil { - return nil, 0, err - } - - return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil -} - -func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { - cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) - if err != nil { - return nil, err - } - - encrypted := info.Data() - if len(encrypted) == 0 { - return nil, errors.New("pkcs12: empty encrypted data") - } - if len(encrypted)%blockSize != 0 { - return nil, errors.New("pkcs12: input is not a multiple of the block size") - } - decrypted = make([]byte, len(encrypted)) - cbc.CryptBlocks(decrypted, encrypted) - - psLen := int(decrypted[len(decrypted)-1]) - if psLen == 0 || psLen > blockSize { - return nil, ErrDecryption - } - - if len(decrypted) < psLen { - return nil, ErrDecryption - } - ps := decrypted[len(decrypted)-psLen:] - decrypted = decrypted[:len(decrypted)-psLen] - if !bytes.Equal(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) { - return nil, ErrDecryption - } - - return -} - -// decryptable abstracts an object that contains ciphertext. -type decryptable interface { - Algorithm() pkix.AlgorithmIdentifier - Data() []byte -} diff --git a/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/golang.org/x/crypto/pkcs12/errors.go deleted file mode 100644 index 7377ce6..0000000 --- a/vendor/golang.org/x/crypto/pkcs12/errors.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import "errors" - -var ( - // ErrDecryption represents a failure to decrypt the input. - ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") - - // ErrIncorrectPassword is returned when an incorrect password is detected. - // Usually, P12/PFX data is signed to be able to verify the password. - ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") -) - -// NotImplementedError indicates that the input is not currently supported. -type NotImplementedError string - -func (e NotImplementedError) Error() string { - return "pkcs12: " + string(e) -} diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go deleted file mode 100644 index 7499e3f..0000000 --- a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package rc2 implements the RC2 cipher -/* -https://www.ietf.org/rfc/rfc2268.txt -http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf - -This code is licensed under the MIT license. -*/ -package rc2 - -import ( - "crypto/cipher" - "encoding/binary" -) - -// The rc2 block size in bytes -const BlockSize = 8 - -type rc2Cipher struct { - k [64]uint16 -} - -// New returns a new rc2 cipher with the given key and effective key length t1 -func New(key []byte, t1 int) (cipher.Block, error) { - // TODO(dgryski): error checking for key length - return &rc2Cipher{ - k: expandKey(key, t1), - }, nil -} - -func (*rc2Cipher) BlockSize() int { return BlockSize } - -var piTable = [256]byte{ - 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, - 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, - 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, - 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, - 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, - 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, - 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, - 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, - 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, - 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, - 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, - 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, - 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, - 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, - 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, - 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, -} - -func expandKey(key []byte, t1 int) [64]uint16 { - - l := make([]byte, 128) - copy(l, key) - - var t = len(key) - var t8 = (t1 + 7) / 8 - var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) - - for i := len(key); i < 128; i++ { - l[i] = piTable[l[i-1]+l[uint8(i-t)]] - } - - l[128-t8] = piTable[l[128-t8]&tm] - - for i := 127 - t8; i >= 0; i-- { - l[i] = piTable[l[i+1]^l[i+t8]] - } - - var k [64]uint16 - - for i := range k { - k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 - } - - return k -} - -func rotl16(x uint16, b uint) uint16 { - return (x >> (16 - b)) | (x << b) -} - -func (c *rc2Cipher) Encrypt(dst, src []byte) { - - r0 := binary.LittleEndian.Uint16(src[0:]) - r1 := binary.LittleEndian.Uint16(src[2:]) - r2 := binary.LittleEndian.Uint16(src[4:]) - r3 := binary.LittleEndian.Uint16(src[6:]) - - var j int - - for j <= 16 { - // mix r0 - r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) - j++ - - // mix r1 - r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) - j++ - - // mix r2 - r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) - j++ - - // mix r3 - r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) - j++ - - } - - r0 = r0 + c.k[r3&63] - r1 = r1 + c.k[r0&63] - r2 = r2 + c.k[r1&63] - r3 = r3 + c.k[r2&63] - - for j <= 40 { - // mix r0 - r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) - j++ - - // mix r1 - r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) - j++ - - // mix r2 - r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) - j++ - - // mix r3 - r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) - j++ - - } - - r0 = r0 + c.k[r3&63] - r1 = r1 + c.k[r0&63] - r2 = r2 + c.k[r1&63] - r3 = r3 + c.k[r2&63] - - for j <= 60 { - // mix r0 - r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) - j++ - - // mix r1 - r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) - j++ - - // mix r2 - r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) - j++ - - // mix r3 - r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) - j++ - } - - binary.LittleEndian.PutUint16(dst[0:], r0) - binary.LittleEndian.PutUint16(dst[2:], r1) - binary.LittleEndian.PutUint16(dst[4:], r2) - binary.LittleEndian.PutUint16(dst[6:], r3) -} - -func (c *rc2Cipher) Decrypt(dst, src []byte) { - - r0 := binary.LittleEndian.Uint16(src[0:]) - r1 := binary.LittleEndian.Uint16(src[2:]) - r2 := binary.LittleEndian.Uint16(src[4:]) - r3 := binary.LittleEndian.Uint16(src[6:]) - - j := 63 - - for j >= 44 { - // unmix r3 - r3 = rotl16(r3, 16-5) - r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) - j-- - - // unmix r2 - r2 = rotl16(r2, 16-3) - r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) - j-- - - // unmix r1 - r1 = rotl16(r1, 16-2) - r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) - j-- - - // unmix r0 - r0 = rotl16(r0, 16-1) - r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) - j-- - } - - r3 = r3 - c.k[r2&63] - r2 = r2 - c.k[r1&63] - r1 = r1 - c.k[r0&63] - r0 = r0 - c.k[r3&63] - - for j >= 20 { - // unmix r3 - r3 = rotl16(r3, 16-5) - r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) - j-- - - // unmix r2 - r2 = rotl16(r2, 16-3) - r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) - j-- - - // unmix r1 - r1 = rotl16(r1, 16-2) - r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) - j-- - - // unmix r0 - r0 = rotl16(r0, 16-1) - r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) - j-- - - } - - r3 = r3 - c.k[r2&63] - r2 = r2 - c.k[r1&63] - r1 = r1 - c.k[r0&63] - r0 = r0 - c.k[r3&63] - - for j >= 0 { - // unmix r3 - r3 = rotl16(r3, 16-5) - r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) - j-- - - // unmix r2 - r2 = rotl16(r2, 16-3) - r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) - j-- - - // unmix r1 - r1 = rotl16(r1, 16-2) - r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) - j-- - - // unmix r0 - r0 = rotl16(r0, 16-1) - r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) - j-- - - } - - binary.LittleEndian.PutUint16(dst[0:], r0) - binary.LittleEndian.PutUint16(dst[2:], r1) - binary.LittleEndian.PutUint16(dst[4:], r2) - binary.LittleEndian.PutUint16(dst[6:], r3) -} diff --git a/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/golang.org/x/crypto/pkcs12/mac.go deleted file mode 100644 index 5f38aa7..0000000 --- a/vendor/golang.org/x/crypto/pkcs12/mac.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/x509/pkix" - "encoding/asn1" -) - -type macData struct { - Mac digestInfo - MacSalt []byte - Iterations int `asn1:"optional,default:1"` -} - -// from PKCS#7: -type digestInfo struct { - Algorithm pkix.AlgorithmIdentifier - Digest []byte -} - -var ( - oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) -) - -func verifyMac(macData *macData, message, password []byte) error { - if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { - return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) - } - - key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) - - mac := hmac.New(sha1.New, key) - mac.Write(message) - expectedMAC := mac.Sum(nil) - - if !hmac.Equal(macData.Mac.Digest, expectedMAC) { - return ErrIncorrectPassword - } - return nil -} diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go deleted file mode 100644 index 5c419d4..0000000 --- a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "bytes" - "crypto/sha1" - "math/big" -) - -var ( - one = big.NewInt(1) -) - -// sha1Sum returns the SHA-1 hash of in. -func sha1Sum(in []byte) []byte { - sum := sha1.Sum(in) - return sum[:] -} - -// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of -// repeats of pattern. -func fillWithRepeats(pattern []byte, v int) []byte { - if len(pattern) == 0 { - return nil - } - outputLen := v * ((len(pattern) + v - 1) / v) - return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] -} - -func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { - // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments - - // Let H be a hash function built around a compression function f: - - // Z_2^u x Z_2^v -> Z_2^u - - // (that is, H has a chaining variable and output of length u bits, and - // the message input to the compression function of H is v bits). The - // values for u and v are as follows: - - // HASH FUNCTION VALUE u VALUE v - // MD2, MD5 128 512 - // SHA-1 160 512 - // SHA-224 224 512 - // SHA-256 256 512 - // SHA-384 384 1024 - // SHA-512 512 1024 - // SHA-512/224 224 1024 - // SHA-512/256 256 1024 - - // Furthermore, let r be the iteration count. - - // We assume here that u and v are both multiples of 8, as are the - // lengths of the password and salt strings (which we denote by p and s, - // respectively) and the number n of pseudorandom bits required. In - // addition, u and v are of course non-zero. - - // For information on security considerations for MD5 [19], see [25] and - // [1], and on those for MD2, see [18]. - - // The following procedure can be used to produce pseudorandom bits for - // a particular "purpose" that is identified by a byte called "ID". - // This standard specifies 3 different values for the ID byte: - - // 1. If ID=1, then the pseudorandom bits being produced are to be used - // as key material for performing encryption or decryption. - - // 2. If ID=2, then the pseudorandom bits being produced are to be used - // as an IV (Initial Value) for encryption or decryption. - - // 3. If ID=3, then the pseudorandom bits being produced are to be used - // as an integrity key for MACing. - - // 1. Construct a string, D (the "diversifier"), by concatenating v/8 - // copies of ID. - var D []byte - for i := 0; i < v; i++ { - D = append(D, ID) - } - - // 2. Concatenate copies of the salt together to create a string S of - // length v(ceiling(s/v)) bits (the final copy of the salt may be - // truncated to create S). Note that if the salt is the empty - // string, then so is S. - - S := fillWithRepeats(salt, v) - - // 3. Concatenate copies of the password together to create a string P - // of length v(ceiling(p/v)) bits (the final copy of the password - // may be truncated to create P). Note that if the password is the - // empty string, then so is P. - - P := fillWithRepeats(password, v) - - // 4. Set I=S||P to be the concatenation of S and P. - I := append(S, P...) - - // 5. Set c=ceiling(n/u). - c := (size + u - 1) / u - - // 6. For i=1, 2, ..., c, do the following: - A := make([]byte, c*20) - var IjBuf []byte - for i := 0; i < c; i++ { - // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, - // H(H(H(... H(D||I)))) - Ai := hash(append(D, I...)) - for j := 1; j < r; j++ { - Ai = hash(Ai) - } - copy(A[i*20:], Ai[:]) - - if i < c-1 { // skip on last iteration - // B. Concatenate copies of Ai to create a string B of length v - // bits (the final copy of Ai may be truncated to create B). - var B []byte - for len(B) < v { - B = append(B, Ai[:]...) - } - B = B[:v] - - // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit - // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by - // setting I_j=(I_j+B+1) mod 2^v for each j. - { - Bbi := new(big.Int).SetBytes(B) - Ij := new(big.Int) - - for j := 0; j < len(I)/v; j++ { - Ij.SetBytes(I[j*v : (j+1)*v]) - Ij.Add(Ij, Bbi) - Ij.Add(Ij, one) - Ijb := Ij.Bytes() - // We expect Ijb to be exactly v bytes, - // if it is longer or shorter we must - // adjust it accordingly. - if len(Ijb) > v { - Ijb = Ijb[len(Ijb)-v:] - } - if len(Ijb) < v { - if IjBuf == nil { - IjBuf = make([]byte, v) - } - bytesShort := v - len(Ijb) - for i := 0; i < bytesShort; i++ { - IjBuf[i] = 0 - } - copy(IjBuf[bytesShort:], Ijb) - Ijb = IjBuf - } - copy(I[j*v:(j+1)*v], Ijb) - } - } - } - } - // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom - // bit string, A. - - // 8. Use the first n bits of A as the output of this entire process. - return A[:size] - - // If the above process is being used to generate a DES key, the process - // should be used to create 64 random bits, and the key's parity bits - // should be set after the 64 bits have been produced. Similar concerns - // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any - // similar keys with parity bits "built into them". -} diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go deleted file mode 100644 index 3a89bdb..0000000 --- a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pkcs12 implements some of PKCS#12. -// -// This implementation is distilled from https://tools.ietf.org/html/rfc7292 -// and referenced documents. It is intended for decoding P12/PFX-stored -// certificates and keys for use with the crypto/tls package. -// -// This package is frozen. If it's missing functionality you need, consider -// an alternative like software.sslmate.com/src/go-pkcs12. -package pkcs12 - -import ( - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/hex" - "encoding/pem" - "errors" -) - -var ( - oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) - oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) - - oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) - oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) - oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) - - errUnknownAttributeOID = errors.New("pkcs12: unknown attribute OID") -) - -type pfxPdu struct { - Version int - AuthSafe contentInfo - MacData macData `asn1:"optional"` -} - -type contentInfo struct { - ContentType asn1.ObjectIdentifier - Content asn1.RawValue `asn1:"tag:0,explicit,optional"` -} - -type encryptedData struct { - Version int - EncryptedContentInfo encryptedContentInfo -} - -type encryptedContentInfo struct { - ContentType asn1.ObjectIdentifier - ContentEncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedContent []byte `asn1:"tag:0,optional"` -} - -func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { - return i.ContentEncryptionAlgorithm -} - -func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } - -type safeBag struct { - Id asn1.ObjectIdentifier - Value asn1.RawValue `asn1:"tag:0,explicit"` - Attributes []pkcs12Attribute `asn1:"set,optional"` -} - -type pkcs12Attribute struct { - Id asn1.ObjectIdentifier - Value asn1.RawValue `asn1:"set"` -} - -type encryptedPrivateKeyInfo struct { - AlgorithmIdentifier pkix.AlgorithmIdentifier - EncryptedData []byte -} - -func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { - return i.AlgorithmIdentifier -} - -func (i encryptedPrivateKeyInfo) Data() []byte { - return i.EncryptedData -} - -// PEM block types -const ( - certificateType = "CERTIFICATE" - privateKeyType = "PRIVATE KEY" -) - -// unmarshal calls asn1.Unmarshal, but also returns an error if there is any -// trailing data after unmarshaling. -func unmarshal(in []byte, out interface{}) error { - trailing, err := asn1.Unmarshal(in, out) - if err != nil { - return err - } - if len(trailing) != 0 { - return errors.New("pkcs12: trailing data found") - } - return nil -} - -// ToPEM converts all "safe bags" contained in pfxData to PEM blocks. -// Unknown attributes are discarded. -// -// Note that although the returned PEM blocks for private keys have type -// "PRIVATE KEY", the bytes are not encoded according to PKCS #8, but according -// to PKCS #1 for RSA keys and SEC 1 for ECDSA keys. -func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { - encodedPassword, err := bmpString(password) - if err != nil { - return nil, ErrIncorrectPassword - } - - bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) - - if err != nil { - return nil, err - } - - blocks := make([]*pem.Block, 0, len(bags)) - for _, bag := range bags { - block, err := convertBag(&bag, encodedPassword) - if err != nil { - return nil, err - } - blocks = append(blocks, block) - } - - return blocks, nil -} - -func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { - block := &pem.Block{ - Headers: make(map[string]string), - } - - for _, attribute := range bag.Attributes { - k, v, err := convertAttribute(&attribute) - if err == errUnknownAttributeOID { - continue - } - if err != nil { - return nil, err - } - block.Headers[k] = v - } - - switch { - case bag.Id.Equal(oidCertBag): - block.Type = certificateType - certsData, err := decodeCertBag(bag.Value.Bytes) - if err != nil { - return nil, err - } - block.Bytes = certsData - case bag.Id.Equal(oidPKCS8ShroundedKeyBag): - block.Type = privateKeyType - - key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) - if err != nil { - return nil, err - } - - switch key := key.(type) { - case *rsa.PrivateKey: - block.Bytes = x509.MarshalPKCS1PrivateKey(key) - case *ecdsa.PrivateKey: - block.Bytes, err = x509.MarshalECPrivateKey(key) - if err != nil { - return nil, err - } - default: - return nil, errors.New("found unknown private key type in PKCS#8 wrapping") - } - default: - return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) - } - return block, nil -} - -func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { - isString := false - - switch { - case attribute.Id.Equal(oidFriendlyName): - key = "friendlyName" - isString = true - case attribute.Id.Equal(oidLocalKeyID): - key = "localKeyId" - case attribute.Id.Equal(oidMicrosoftCSPName): - // This key is chosen to match OpenSSL. - key = "Microsoft CSP Name" - isString = true - default: - return "", "", errUnknownAttributeOID - } - - if isString { - if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { - return "", "", err - } - if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { - return "", "", err - } - } else { - var id []byte - if err := unmarshal(attribute.Value.Bytes, &id); err != nil { - return "", "", err - } - value = hex.EncodeToString(id) - } - - return key, value, nil -} - -// Decode extracts a certificate and private key from pfxData. This function -// assumes that there is only one certificate and only one private key in the -// pfxData; if there are more use ToPEM instead. -func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { - encodedPassword, err := bmpString(password) - if err != nil { - return nil, nil, err - } - - bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) - if err != nil { - return nil, nil, err - } - - if len(bags) != 2 { - err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") - return - } - - for _, bag := range bags { - switch { - case bag.Id.Equal(oidCertBag): - if certificate != nil { - err = errors.New("pkcs12: expected exactly one certificate bag") - } - - certsData, err := decodeCertBag(bag.Value.Bytes) - if err != nil { - return nil, nil, err - } - certs, err := x509.ParseCertificates(certsData) - if err != nil { - return nil, nil, err - } - if len(certs) != 1 { - err = errors.New("pkcs12: expected exactly one certificate in the certBag") - return nil, nil, err - } - certificate = certs[0] - - case bag.Id.Equal(oidPKCS8ShroundedKeyBag): - if privateKey != nil { - err = errors.New("pkcs12: expected exactly one key bag") - return nil, nil, err - } - - if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { - return nil, nil, err - } - } - } - - if certificate == nil { - return nil, nil, errors.New("pkcs12: certificate missing") - } - if privateKey == nil { - return nil, nil, errors.New("pkcs12: private key missing") - } - - return -} - -func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { - pfx := new(pfxPdu) - if err := unmarshal(p12Data, pfx); err != nil { - return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) - } - - if pfx.Version != 3 { - return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") - } - - if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { - return nil, nil, NotImplementedError("only password-protected PFX is implemented") - } - - // unmarshal the explicit bytes in the content for type 'data' - if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { - return nil, nil, err - } - - if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { - return nil, nil, errors.New("pkcs12: no MAC in data") - } - - if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { - if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { - // some implementations use an empty byte array - // for the empty string password try one more - // time with empty-empty password - password = nil - err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) - } - if err != nil { - return nil, nil, err - } - } - - var authenticatedSafe []contentInfo - if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { - return nil, nil, err - } - - if len(authenticatedSafe) != 2 { - return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") - } - - for _, ci := range authenticatedSafe { - var data []byte - - switch { - case ci.ContentType.Equal(oidDataContentType): - if err := unmarshal(ci.Content.Bytes, &data); err != nil { - return nil, nil, err - } - case ci.ContentType.Equal(oidEncryptedDataContentType): - var encryptedData encryptedData - if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { - return nil, nil, err - } - if encryptedData.Version != 0 { - return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") - } - if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { - return nil, nil, err - } - default: - return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") - } - - var safeContents []safeBag - if err := unmarshal(data, &safeContents); err != nil { - return nil, nil, err - } - bags = append(bags, safeContents...) - } - - return bags, password, nil -} diff --git a/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/golang.org/x/crypto/pkcs12/safebags.go deleted file mode 100644 index def1f7b..0000000 --- a/vendor/golang.org/x/crypto/pkcs12/safebags.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "crypto/x509" - "encoding/asn1" - "errors" -) - -var ( - // see https://tools.ietf.org/html/rfc7292#appendix-D - oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) - oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) - oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) -) - -type certBag struct { - Id asn1.ObjectIdentifier - Data []byte `asn1:"tag:0,explicit"` -} - -func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { - pkinfo := new(encryptedPrivateKeyInfo) - if err = unmarshal(asn1Data, pkinfo); err != nil { - return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) - } - - pkData, err := pbDecrypt(pkinfo, password) - if err != nil { - return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) - } - - ret := new(asn1.RawValue) - if err = unmarshal(pkData, ret); err != nil { - return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) - } - - if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { - return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) - } - - return privateKey, nil -} - -func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { - bag := new(certBag) - if err := unmarshal(asn1Data, bag); err != nil { - return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) - } - if !bag.Id.Equal(oidCertTypeX509Certificate) { - return nil, NotImplementedError("only X509 certificates are supported") - } - return bag.Data, nil -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 37dc0cf..0000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 822ed42..7a96eae 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -92,6 +92,21 @@ example, to process each anchor node in depth-first order: The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and https://html.spec.whatwg.org/multipage/syntax.html#tokenization + +# Security Considerations + +Care should be taken when parsing and interpreting HTML, whether full documents +or fragments, within the framework of the HTML specification, especially with +regard to untrusted inputs. + +This package provides both a tokenizer and a parser. Only the parser constructs +a DOM according to the HTML specification, resolving malformed and misplaced +tags where appropriate. The tokenizer simply tokenizes the HTML presented to it, +and as such does not resolve issues that may exist in the processed HTML, +producing a literal interpretation of the input. + +If your use case requires semantically well-formed HTML, as defined by the +WHATWG specifiction, the parser should be used rather than the tokenizer. */ package html // import "golang.org/x/net/html" diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go index d856139..04c6bec 100644 --- a/vendor/golang.org/x/net/html/escape.go +++ b/vendor/golang.org/x/net/html/escape.go @@ -193,6 +193,87 @@ func lower(b []byte) []byte { return b } +// escapeComment is like func escape but escapes its input bytes less often. +// Per https://github.com/golang/go/issues/58246 some HTML comments are (1) +// meaningful and (2) contain angle brackets that we'd like to avoid escaping +// unless we have to. +// +// "We have to" includes the '&' byte, since that introduces other escapes. +// +// It also includes those bytes (not including EOF) that would otherwise end +// the comment. Per the summary table at the bottom of comment_test.go, this is +// the '>' byte that, per above, we'd like to avoid escaping unless we have to. +// +// Studying the summary table (and T actions in its '>' column) closely, we +// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the +// start of the comment data. State 52 is after a '!'. The other three states +// are after a '-'. +// +// Our algorithm is thus to escape every '&' and to escape '>' if and only if: +// - The '>' is after a '!' or '-' (in the unescaped data) or +// - The '>' is at the start of the comment data (after the opening ""); err != nil { diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index be3c754..5c2a1f4 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -110,7 +110,7 @@ func (t Token) String() string { case SelfClosingTagToken: return "<" + t.tagString() + "/>" case CommentToken: - return "" + return "" case DoctypeToken: return "" } @@ -598,6 +598,11 @@ scriptDataDoubleEscapeEnd: // readComment reads the next comment token starting with "") return } @@ -628,19 +632,52 @@ func (z *Tokenizer) readComment() { if dashCount >= 2 { c = z.readByte() if z.err != nil { - z.data.end = z.raw.end + z.data.end = z.calculateAbruptCommentDataEnd() return - } - if c == '>' { + } else if c == '>' { z.data.end = z.raw.end - len("--!>") return + } else if c == '-' { + dashCount = 1 + beginning = false + continue } } } dashCount = 0 + beginning = false } } +func (z *Tokenizer) calculateAbruptCommentDataEnd() int { + raw := z.Raw() + const prefixLen = len("