mirror of
https://github.com/1Password/onepassword-operator.git
synced 2025-10-23 07:58:04 +00:00
Upgrade the operator to use Operator SDK v1.33.0 (#182)
* Move controller package inside internal directory Based on the go/v4 project structure, the following changed: - Pakcage `controllers` is now named `controller` - Package `controller` now lives inside new `internal` directory * Move main.go in cmd directory Based on the new go/v4 project structure, `main.go` now lives in the `cmd` directory. * Change package import in main.go * Update go mod dependencies Update the dependencies based on the versions obtained by creating a new operator project using `kubebuilder init --domain onepassword.com --plugins=go/v4`. This is based on the migration steps provided to go from go/v3 to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4) * Update vendor * Adjust code for breaking changes from pkg update sigs.k8s.io/controller-runtime package had breaking changes from v0.14.5 to v0.16.3. This commit brings the changes needed to achieve the same things using the new functionality avaialble. * Adjust paths to connect yaml files Since `main.go` is now in `cmd` directory, the paths to the files for deploying Connect have to be adjusted based on the new location `main.go` is executed from. * Update files based on new structure and scaffolding These changes are made based on the new project structure and scaffolding obtained when using the new go/v4 project structure. These were done based on the migration steps mentioned when migrating to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4). * Update config files These updates are made based on the Kustomize v4 syntax. This is part of the upgrate to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4) * Update dependencies and GO version * Update vendor * Update Kubernetes tools versions * Update operator version in Makefile Now the version in the Makefile matches the version of the operator * Update Operator SDK version in version.go * Adjust generated deepcopy It seems that the +build tag is no longer needed based on the latest generated scaffolding, therefore it's removed. * Update copyright year * Bring back missing changes from migration Some customization in Makefile was lost during the migration process. Specifically, the namespace customization for `make deploy` command. Also, we push changes to kustomization.yaml for making the deploy process smoother. * Add RBAC perms for coordination.k8s.io It seems that with the latest changes to Kubernetes and Kustomize, we need to add additional RBAC to the service account used so that it can properly access the `leases` resource. * Optimize Dockerfile Dockerfile had a step for caching dependencies (go mod download). However, this is already done by the vendor directory, which we include. Therefore, this step can be removed to make the image build time faster.
This commit is contained in:
89
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
89
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
@@ -17,7 +17,6 @@ package profile
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (p *Profile) decoder() []decoder {
|
||||
@@ -184,13 +183,12 @@ var profileDecoder = []decoder{
|
||||
// repeated Location location = 4
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Location)
|
||||
x.Line = b.tmpLines[:0] // Use shared space temporarily
|
||||
x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
|
||||
pp := m.(*Profile)
|
||||
pp.Location = append(pp.Location, x)
|
||||
err := decodeMessage(b, x)
|
||||
b.tmpLines = x.Line[:0]
|
||||
// Copy to shrink size and detach from shared space.
|
||||
x.Line = append([]Line(nil), x.Line...)
|
||||
var tmp []Line
|
||||
x.Line = append(tmp, x.Line...) // Shrink to allocated size
|
||||
return err
|
||||
},
|
||||
// repeated Function function = 5
|
||||
@@ -254,14 +252,6 @@ func (p *Profile) postDecode() error {
|
||||
} else {
|
||||
mappings[m.ID] = m
|
||||
}
|
||||
|
||||
// If this a main linux kernel mapping with a relocation symbol suffix
|
||||
// ("[kernel.kallsyms]_text"), extract said suffix.
|
||||
// It is fairly hacky to handle at this level, but the alternatives appear even worse.
|
||||
if strings.HasPrefix(m.File, "[kernel.kallsyms]") {
|
||||
m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
functions := make(map[uint64]*Function, len(p.Function))
|
||||
@@ -308,52 +298,41 @@ func (p *Profile) postDecode() error {
|
||||
st.Unit, err = getString(p.stringTable, &st.unitX, err)
|
||||
}
|
||||
|
||||
// Pre-allocate space for all locations.
|
||||
numLocations := 0
|
||||
for _, s := range p.Sample {
|
||||
numLocations += len(s.locationIDX)
|
||||
}
|
||||
locBuffer := make([]*Location, numLocations)
|
||||
|
||||
for _, s := range p.Sample {
|
||||
if len(s.labelX) > 0 {
|
||||
labels := make(map[string][]string, len(s.labelX))
|
||||
numLabels := make(map[string][]int64, len(s.labelX))
|
||||
numUnits := make(map[string][]string, len(s.labelX))
|
||||
for _, l := range s.labelX {
|
||||
var key, value string
|
||||
key, err = getString(p.stringTable, &l.keyX, err)
|
||||
if l.strX != 0 {
|
||||
value, err = getString(p.stringTable, &l.strX, err)
|
||||
labels[key] = append(labels[key], value)
|
||||
} else if l.numX != 0 || l.unitX != 0 {
|
||||
numValues := numLabels[key]
|
||||
units := numUnits[key]
|
||||
if l.unitX != 0 {
|
||||
var unit string
|
||||
unit, err = getString(p.stringTable, &l.unitX, err)
|
||||
units = padStringArray(units, len(numValues))
|
||||
numUnits[key] = append(units, unit)
|
||||
}
|
||||
numLabels[key] = append(numLabels[key], l.numX)
|
||||
labels := make(map[string][]string, len(s.labelX))
|
||||
numLabels := make(map[string][]int64, len(s.labelX))
|
||||
numUnits := make(map[string][]string, len(s.labelX))
|
||||
for _, l := range s.labelX {
|
||||
var key, value string
|
||||
key, err = getString(p.stringTable, &l.keyX, err)
|
||||
if l.strX != 0 {
|
||||
value, err = getString(p.stringTable, &l.strX, err)
|
||||
labels[key] = append(labels[key], value)
|
||||
} else if l.numX != 0 || l.unitX != 0 {
|
||||
numValues := numLabels[key]
|
||||
units := numUnits[key]
|
||||
if l.unitX != 0 {
|
||||
var unit string
|
||||
unit, err = getString(p.stringTable, &l.unitX, err)
|
||||
units = padStringArray(units, len(numValues))
|
||||
numUnits[key] = append(units, unit)
|
||||
}
|
||||
}
|
||||
if len(labels) > 0 {
|
||||
s.Label = labels
|
||||
}
|
||||
if len(numLabels) > 0 {
|
||||
s.NumLabel = numLabels
|
||||
for key, units := range numUnits {
|
||||
if len(units) > 0 {
|
||||
numUnits[key] = padStringArray(units, len(numLabels[key]))
|
||||
}
|
||||
}
|
||||
s.NumUnit = numUnits
|
||||
numLabels[key] = append(numLabels[key], l.numX)
|
||||
}
|
||||
}
|
||||
|
||||
s.Location = locBuffer[:len(s.locationIDX)]
|
||||
locBuffer = locBuffer[len(s.locationIDX):]
|
||||
if len(labels) > 0 {
|
||||
s.Label = labels
|
||||
}
|
||||
if len(numLabels) > 0 {
|
||||
s.NumLabel = numLabels
|
||||
for key, units := range numUnits {
|
||||
if len(units) > 0 {
|
||||
numUnits[key] = padStringArray(units, len(numLabels[key]))
|
||||
}
|
||||
}
|
||||
s.NumUnit = numUnits
|
||||
}
|
||||
s.Location = make([]*Location, len(s.locationIDX))
|
||||
for i, lid := range s.locationIDX {
|
||||
if lid < uint64(len(locationIds)) {
|
||||
s.Location[i] = locationIds[lid]
|
||||
|
4
vendor/github.com/google/pprof/profile/filter.go
generated
vendored
4
vendor/github.com/google/pprof/profile/filter.go
generated
vendored
@@ -22,10 +22,6 @@ import "regexp"
|
||||
// samples where at least one frame matches focus but none match ignore.
|
||||
// Returns true is the corresponding regexp matched at least one sample.
|
||||
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
|
||||
if focus == nil && ignore == nil && hide == nil && show == nil {
|
||||
fm = true // Missing focus implies a match
|
||||
return
|
||||
}
|
||||
focusOrIgnore := make(map[uint64]bool)
|
||||
hidden := make(map[uint64]bool)
|
||||
for _, l := range p.Location {
|
||||
|
31
vendor/github.com/google/pprof/profile/legacy_profile.go
generated
vendored
31
vendor/github.com/google/pprof/profile/legacy_profile.go
generated
vendored
@@ -295,12 +295,11 @@ func get64b(b []byte) (uint64, []byte) {
|
||||
//
|
||||
// The general format for profilez samples is a sequence of words in
|
||||
// binary format. The first words are a header with the following data:
|
||||
//
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 3
|
||||
// 3rd word -- 0 if a c++ application, 1 if a java application.
|
||||
// 4th word -- Sampling period (in microseconds).
|
||||
// 5th word -- Padding.
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 3
|
||||
// 3rd word -- 0 if a c++ application, 1 if a java application.
|
||||
// 4th word -- Sampling period (in microseconds).
|
||||
// 5th word -- Padding.
|
||||
func parseCPU(b []byte) (*Profile, error) {
|
||||
var parse func([]byte) (uint64, []byte)
|
||||
var n1, n2, n3, n4, n5 uint64
|
||||
@@ -404,18 +403,15 @@ func cleanupDuplicateLocations(p *Profile) {
|
||||
//
|
||||
// profilez samples are a repeated sequence of stack frames of the
|
||||
// form:
|
||||
//
|
||||
// 1st word -- The number of times this stack was encountered.
|
||||
// 2nd word -- The size of the stack (StackSize).
|
||||
// 3rd word -- The first address on the stack.
|
||||
// ...
|
||||
// StackSize + 2 -- The last address on the stack
|
||||
//
|
||||
// 1st word -- The number of times this stack was encountered.
|
||||
// 2nd word -- The size of the stack (StackSize).
|
||||
// 3rd word -- The first address on the stack.
|
||||
// ...
|
||||
// StackSize + 2 -- The last address on the stack
|
||||
// The last stack trace is of the form:
|
||||
//
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 1
|
||||
// 3rd word -- 0
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 1
|
||||
// 3rd word -- 0
|
||||
//
|
||||
// Addresses from stack traces may point to the next instruction after
|
||||
// each call. Optionally adjust by -1 to land somewhere on the actual
|
||||
@@ -865,6 +861,7 @@ func parseThread(b []byte) (*Profile, error) {
|
||||
// Recognize each thread and populate profile samples.
|
||||
for !isMemoryMapSentinel(line) {
|
||||
if strings.HasPrefix(line, "---- no stack trace for") {
|
||||
line = ""
|
||||
break
|
||||
}
|
||||
if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
|
||||
|
278
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
278
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
@@ -15,7 +15,6 @@
|
||||
package profile
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -59,7 +58,7 @@ func Merge(srcs []*Profile) (*Profile, error) {
|
||||
|
||||
for _, src := range srcs {
|
||||
// Clear the profile-specific hash tables
|
||||
pm.locationsByID = makeLocationIDMap(len(src.Location))
|
||||
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
|
||||
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
|
||||
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
|
||||
|
||||
@@ -137,7 +136,7 @@ type profileMerger struct {
|
||||
p *Profile
|
||||
|
||||
// Memoization tables within a profile.
|
||||
locationsByID locationIDMap
|
||||
locationsByID map[uint64]*Location
|
||||
functionsByID map[uint64]*Function
|
||||
mappingsByID map[uint64]mapInfo
|
||||
|
||||
@@ -154,16 +153,6 @@ type mapInfo struct {
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
||||
// Check memoization table
|
||||
k := pm.sampleKey(src)
|
||||
if ss, ok := pm.samples[k]; ok {
|
||||
for i, v := range src.Value {
|
||||
ss.Value[i] += v
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// Make new sample.
|
||||
s := &Sample{
|
||||
Location: make([]*Location, len(src.Location)),
|
||||
Value: make([]int64, len(src.Value)),
|
||||
@@ -188,98 +177,52 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
||||
s.NumLabel[k] = vv
|
||||
s.NumUnit[k] = uu
|
||||
}
|
||||
// Check memoization table. Must be done on the remapped location to
|
||||
// account for the remapped mapping. Add current values to the
|
||||
// existing sample.
|
||||
k := s.key()
|
||||
if ss, ok := pm.samples[k]; ok {
|
||||
for i, v := range src.Value {
|
||||
ss.Value[i] += v
|
||||
}
|
||||
return ss
|
||||
}
|
||||
copy(s.Value, src.Value)
|
||||
pm.samples[k] = s
|
||||
pm.p.Sample = append(pm.p.Sample, s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
|
||||
// Accumulate contents into a string.
|
||||
var buf strings.Builder
|
||||
buf.Grow(64) // Heuristic to avoid extra allocs
|
||||
|
||||
// encode a number
|
||||
putNumber := func(v uint64) {
|
||||
var num [binary.MaxVarintLen64]byte
|
||||
n := binary.PutUvarint(num[:], v)
|
||||
buf.Write(num[:n])
|
||||
// key generates sampleKey to be used as a key for maps.
|
||||
func (sample *Sample) key() sampleKey {
|
||||
ids := make([]string, len(sample.Location))
|
||||
for i, l := range sample.Location {
|
||||
ids[i] = strconv.FormatUint(l.ID, 16)
|
||||
}
|
||||
|
||||
// encode a string prefixed with its length.
|
||||
putDelimitedString := func(s string) {
|
||||
putNumber(uint64(len(s)))
|
||||
buf.WriteString(s)
|
||||
labels := make([]string, 0, len(sample.Label))
|
||||
for k, v := range sample.Label {
|
||||
labels = append(labels, fmt.Sprintf("%q%q", k, v))
|
||||
}
|
||||
sort.Strings(labels)
|
||||
|
||||
for _, l := range sample.Location {
|
||||
// Get the location in the merged profile, which may have a different ID.
|
||||
if loc := pm.mapLocation(l); loc != nil {
|
||||
putNumber(loc.ID)
|
||||
}
|
||||
numlabels := make([]string, 0, len(sample.NumLabel))
|
||||
for k, v := range sample.NumLabel {
|
||||
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
|
||||
}
|
||||
putNumber(0) // Delimiter
|
||||
sort.Strings(numlabels)
|
||||
|
||||
for _, l := range sortedKeys1(sample.Label) {
|
||||
putDelimitedString(l)
|
||||
values := sample.Label[l]
|
||||
putNumber(uint64(len(values)))
|
||||
for _, v := range values {
|
||||
putDelimitedString(v)
|
||||
}
|
||||
return sampleKey{
|
||||
strings.Join(ids, "|"),
|
||||
strings.Join(labels, ""),
|
||||
strings.Join(numlabels, ""),
|
||||
}
|
||||
|
||||
for _, l := range sortedKeys2(sample.NumLabel) {
|
||||
putDelimitedString(l)
|
||||
values := sample.NumLabel[l]
|
||||
putNumber(uint64(len(values)))
|
||||
for _, v := range values {
|
||||
putNumber(uint64(v))
|
||||
}
|
||||
units := sample.NumUnit[l]
|
||||
putNumber(uint64(len(units)))
|
||||
for _, v := range units {
|
||||
putDelimitedString(v)
|
||||
}
|
||||
}
|
||||
|
||||
return sampleKey(buf.String())
|
||||
}
|
||||
|
||||
type sampleKey string
|
||||
|
||||
// sortedKeys1 returns the sorted keys found in a string->[]string map.
|
||||
//
|
||||
// Note: this is currently non-generic since github pprof runs golint,
|
||||
// which does not support generics. When that issue is fixed, it can
|
||||
// be merged with sortedKeys2 and made into a generic function.
|
||||
func sortedKeys1(m map[string][]string) []string {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
|
||||
//
|
||||
// Note: this is currently non-generic since github pprof runs golint,
|
||||
// which does not support generics. When that issue is fixed, it can
|
||||
// be merged with sortedKeys1 and made into a generic function.
|
||||
func sortedKeys2(m map[string][]int64) []string {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
type sampleKey struct {
|
||||
locations string
|
||||
labels string
|
||||
numlabels string
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||
@@ -287,7 +230,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||
return nil
|
||||
}
|
||||
|
||||
if l := pm.locationsByID.get(src.ID); l != nil {
|
||||
if l, ok := pm.locationsByID[src.ID]; ok {
|
||||
return l
|
||||
}
|
||||
|
||||
@@ -306,10 +249,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||
// account for the remapped mapping ID.
|
||||
k := l.key()
|
||||
if ll, ok := pm.locations[k]; ok {
|
||||
pm.locationsByID.set(src.ID, ll)
|
||||
pm.locationsByID[src.ID] = ll
|
||||
return ll
|
||||
}
|
||||
pm.locationsByID.set(src.ID, l)
|
||||
pm.locationsByID[src.ID] = l
|
||||
pm.locations[k] = l
|
||||
pm.p.Location = append(pm.p.Location, l)
|
||||
return l
|
||||
@@ -360,17 +303,16 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
|
||||
return mi
|
||||
}
|
||||
m := &Mapping{
|
||||
ID: uint64(len(pm.p.Mapping) + 1),
|
||||
Start: src.Start,
|
||||
Limit: src.Limit,
|
||||
Offset: src.Offset,
|
||||
File: src.File,
|
||||
KernelRelocationSymbol: src.KernelRelocationSymbol,
|
||||
BuildID: src.BuildID,
|
||||
HasFunctions: src.HasFunctions,
|
||||
HasFilenames: src.HasFilenames,
|
||||
HasLineNumbers: src.HasLineNumbers,
|
||||
HasInlineFrames: src.HasInlineFrames,
|
||||
ID: uint64(len(pm.p.Mapping) + 1),
|
||||
Start: src.Start,
|
||||
Limit: src.Limit,
|
||||
Offset: src.Offset,
|
||||
File: src.File,
|
||||
BuildID: src.BuildID,
|
||||
HasFunctions: src.HasFunctions,
|
||||
HasFilenames: src.HasFilenames,
|
||||
HasLineNumbers: src.HasLineNumbers,
|
||||
HasInlineFrames: src.HasInlineFrames,
|
||||
}
|
||||
pm.p.Mapping = append(pm.p.Mapping, m)
|
||||
|
||||
@@ -537,131 +479,3 @@ func (p *Profile) compatible(pb *Profile) error {
|
||||
func equalValueType(st1, st2 *ValueType) bool {
|
||||
return st1.Type == st2.Type && st1.Unit == st2.Unit
|
||||
}
|
||||
|
||||
// locationIDMap is like a map[uint64]*Location, but provides efficiency for
|
||||
// ids that are densely numbered, which is often the case.
|
||||
type locationIDMap struct {
|
||||
dense []*Location // indexed by id for id < len(dense)
|
||||
sparse map[uint64]*Location // indexed by id for id >= len(dense)
|
||||
}
|
||||
|
||||
func makeLocationIDMap(n int) locationIDMap {
|
||||
return locationIDMap{
|
||||
dense: make([]*Location, n),
|
||||
sparse: map[uint64]*Location{},
|
||||
}
|
||||
}
|
||||
|
||||
func (lm locationIDMap) get(id uint64) *Location {
|
||||
if id < uint64(len(lm.dense)) {
|
||||
return lm.dense[int(id)]
|
||||
}
|
||||
return lm.sparse[id]
|
||||
}
|
||||
|
||||
func (lm locationIDMap) set(id uint64, loc *Location) {
|
||||
if id < uint64(len(lm.dense)) {
|
||||
lm.dense[id] = loc
|
||||
return
|
||||
}
|
||||
lm.sparse[id] = loc
|
||||
}
|
||||
|
||||
// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
|
||||
// keeps sample types that appear in all profiles only and drops/reorders the
|
||||
// sample types as necessary.
|
||||
//
|
||||
// In the case of sample types order is not the same for given profiles the
|
||||
// order is derived from the first profile.
|
||||
//
|
||||
// Profiles are modified in-place.
|
||||
//
|
||||
// It returns an error if the sample type's intersection is empty.
|
||||
func CompatibilizeSampleTypes(ps []*Profile) error {
|
||||
sTypes := commonSampleTypes(ps)
|
||||
if len(sTypes) == 0 {
|
||||
return fmt.Errorf("profiles have empty common sample type list")
|
||||
}
|
||||
for _, p := range ps {
|
||||
if err := compatibilizeSampleTypes(p, sTypes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// commonSampleTypes returns sample types that appear in all profiles in the
|
||||
// order how they ordered in the first profile.
|
||||
func commonSampleTypes(ps []*Profile) []string {
|
||||
if len(ps) == 0 {
|
||||
return nil
|
||||
}
|
||||
sTypes := map[string]int{}
|
||||
for _, p := range ps {
|
||||
for _, st := range p.SampleType {
|
||||
sTypes[st.Type]++
|
||||
}
|
||||
}
|
||||
var res []string
|
||||
for _, st := range ps[0].SampleType {
|
||||
if sTypes[st.Type] == len(ps) {
|
||||
res = append(res, st.Type)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// compatibilizeSampleTypes drops sample types that are not present in sTypes
|
||||
// list and reorder them if needed.
|
||||
//
|
||||
// It sets DefaultSampleType to sType[0] if it is not in sType list.
|
||||
//
|
||||
// It assumes that all sample types from the sTypes list are present in the
|
||||
// given profile otherwise it returns an error.
|
||||
func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
|
||||
if len(sTypes) == 0 {
|
||||
return fmt.Errorf("sample type list is empty")
|
||||
}
|
||||
defaultSampleType := sTypes[0]
|
||||
reMap, needToModify := make([]int, len(sTypes)), false
|
||||
for i, st := range sTypes {
|
||||
if st == p.DefaultSampleType {
|
||||
defaultSampleType = p.DefaultSampleType
|
||||
}
|
||||
idx := searchValueType(p.SampleType, st)
|
||||
if idx < 0 {
|
||||
return fmt.Errorf("%q sample type is not found in profile", st)
|
||||
}
|
||||
reMap[i] = idx
|
||||
if idx != i {
|
||||
needToModify = true
|
||||
}
|
||||
}
|
||||
if !needToModify && len(sTypes) == len(p.SampleType) {
|
||||
return nil
|
||||
}
|
||||
p.DefaultSampleType = defaultSampleType
|
||||
oldSampleTypes := p.SampleType
|
||||
p.SampleType = make([]*ValueType, len(sTypes))
|
||||
for i, idx := range reMap {
|
||||
p.SampleType[i] = oldSampleTypes[idx]
|
||||
}
|
||||
values := make([]int64, len(sTypes))
|
||||
for _, s := range p.Sample {
|
||||
for i, idx := range reMap {
|
||||
values[i] = s.Value[idx]
|
||||
}
|
||||
s.Value = s.Value[:len(values)]
|
||||
copy(s.Value, values)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func searchValueType(vts []*ValueType, s string) int {
|
||||
for i, vt := range vts {
|
||||
if vt.Type == s {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
61
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
61
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
@@ -21,6 +21,7 @@ import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -72,23 +73,9 @@ type ValueType struct {
|
||||
type Sample struct {
|
||||
Location []*Location
|
||||
Value []int64
|
||||
// Label is a per-label-key map to values for string labels.
|
||||
//
|
||||
// In general, having multiple values for the given label key is strongly
|
||||
// discouraged - see docs for the sample label field in profile.proto. The
|
||||
// main reason this unlikely state is tracked here is to make the
|
||||
// decoding->encoding roundtrip not lossy. But we expect that the value
|
||||
// slices present in this map are always of length 1.
|
||||
Label map[string][]string
|
||||
// NumLabel is a per-label-key map to values for numeric labels. See a note
|
||||
// above on handling multiple values for a label.
|
||||
Label map[string][]string
|
||||
NumLabel map[string][]int64
|
||||
// NumUnit is a per-label-key map to the unit names of corresponding numeric
|
||||
// label values. The unit info may be missing even if the label is in
|
||||
// NumLabel, see the docs in profile.proto for details. When the value is
|
||||
// slice is present and not nil, its length must be equal to the length of
|
||||
// the corresponding value slice in NumLabel.
|
||||
NumUnit map[string][]string
|
||||
NumUnit map[string][]string
|
||||
|
||||
locationIDX []uint64
|
||||
labelX []label
|
||||
@@ -119,15 +106,6 @@ type Mapping struct {
|
||||
|
||||
fileX int64
|
||||
buildIDX int64
|
||||
|
||||
// Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
|
||||
// For linux kernel mappings generated by some tools, correct symbolization depends
|
||||
// on knowing which of the two possible relocation symbols was used for `Start`.
|
||||
// This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
|
||||
//
|
||||
// Note, this public field is not persisted in the proto. For the purposes of
|
||||
// copying / merging / hashing profiles, it is considered subsumed by `File`.
|
||||
KernelRelocationSymbol string
|
||||
}
|
||||
|
||||
// Location corresponds to Profile.Location
|
||||
@@ -166,7 +144,7 @@ type Function struct {
|
||||
// may be a gzip-compressed encoded protobuf or one of many legacy
|
||||
// profile formats which may be unsupported in the future.
|
||||
func Parse(r io.Reader) (*Profile, error) {
|
||||
data, err := io.ReadAll(r)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -181,7 +159,7 @@ func ParseData(data []byte) (*Profile, error) {
|
||||
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err == nil {
|
||||
data, err = io.ReadAll(gz)
|
||||
data, err = ioutil.ReadAll(gz)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decompressing profile: %v", err)
|
||||
@@ -729,35 +707,6 @@ func (s *Sample) HasLabel(key, value string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// SetNumLabel sets the specified key to the specified value for all samples in the
|
||||
// profile. "unit" is a slice that describes the units that each corresponding member
|
||||
// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
|
||||
// unit for a given value, that member of "unit" should be the empty string.
|
||||
// "unit" must either have the same length as "value", or be nil.
|
||||
func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
|
||||
for _, sample := range p.Sample {
|
||||
if sample.NumLabel == nil {
|
||||
sample.NumLabel = map[string][]int64{key: value}
|
||||
} else {
|
||||
sample.NumLabel[key] = value
|
||||
}
|
||||
if sample.NumUnit == nil {
|
||||
sample.NumUnit = map[string][]string{key: unit}
|
||||
} else {
|
||||
sample.NumUnit[key] = unit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveNumLabel removes all numerical labels associated with the specified key for all
|
||||
// samples in the profile.
|
||||
func (p *Profile) RemoveNumLabel(key string) {
|
||||
for _, sample := range p.Sample {
|
||||
delete(sample.NumLabel, key)
|
||||
delete(sample.NumUnit, key)
|
||||
}
|
||||
}
|
||||
|
||||
// DiffBaseSample returns true if a sample belongs to the diff base and false
|
||||
// otherwise.
|
||||
func (s *Sample) DiffBaseSample() bool {
|
||||
|
19
vendor/github.com/google/pprof/profile/proto.go
generated
vendored
19
vendor/github.com/google/pprof/profile/proto.go
generated
vendored
@@ -39,12 +39,11 @@ import (
|
||||
)
|
||||
|
||||
type buffer struct {
|
||||
field int // field tag
|
||||
typ int // proto wire type code for field
|
||||
u64 uint64
|
||||
data []byte
|
||||
tmp [16]byte
|
||||
tmpLines []Line // temporary storage used while decoding "repeated Line".
|
||||
field int // field tag
|
||||
typ int // proto wire type code for field
|
||||
u64 uint64
|
||||
data []byte
|
||||
tmp [16]byte
|
||||
}
|
||||
|
||||
type decoder func(*buffer, message) error
|
||||
@@ -287,6 +286,7 @@ func decodeInt64s(b *buffer, x *[]int64) error {
|
||||
if b.typ == 2 {
|
||||
// Packed encoding
|
||||
data := b.data
|
||||
tmp := make([]int64, 0, len(data)) // Maximally sized
|
||||
for len(data) > 0 {
|
||||
var u uint64
|
||||
var err error
|
||||
@@ -294,8 +294,9 @@ func decodeInt64s(b *buffer, x *[]int64) error {
|
||||
if u, data, err = decodeVarint(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, int64(u))
|
||||
tmp = append(tmp, int64(u))
|
||||
}
|
||||
*x = append(*x, tmp...)
|
||||
return nil
|
||||
}
|
||||
var i int64
|
||||
@@ -318,6 +319,7 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
|
||||
if b.typ == 2 {
|
||||
data := b.data
|
||||
// Packed encoding
|
||||
tmp := make([]uint64, 0, len(data)) // Maximally sized
|
||||
for len(data) > 0 {
|
||||
var u uint64
|
||||
var err error
|
||||
@@ -325,8 +327,9 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
|
||||
if u, data, err = decodeVarint(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, u)
|
||||
tmp = append(tmp, u)
|
||||
}
|
||||
*x = append(*x, tmp...)
|
||||
return nil
|
||||
}
|
||||
var u uint64
|
||||
|
26
vendor/github.com/google/pprof/profile/prune.go
generated
vendored
26
vendor/github.com/google/pprof/profile/prune.go
generated
vendored
@@ -62,31 +62,15 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
|
||||
prune := make(map[uint64]bool)
|
||||
pruneBeneath := make(map[uint64]bool)
|
||||
|
||||
// simplifyFunc can be expensive, so cache results.
|
||||
// Note that the same function name can be encountered many times due
|
||||
// different lines and addresses in the same function.
|
||||
pruneCache := map[string]bool{} // Map from function to whether or not to prune
|
||||
pruneFromHere := func(s string) bool {
|
||||
if r, ok := pruneCache[s]; ok {
|
||||
return r
|
||||
}
|
||||
funcName := simplifyFunc(s)
|
||||
if dropRx.MatchString(funcName) {
|
||||
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||
pruneCache[s] = true
|
||||
return true
|
||||
}
|
||||
}
|
||||
pruneCache[s] = false
|
||||
return false
|
||||
}
|
||||
|
||||
for _, loc := range p.Location {
|
||||
var i int
|
||||
for i = len(loc.Line) - 1; i >= 0; i-- {
|
||||
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||
if pruneFromHere(fn.Name) {
|
||||
break
|
||||
funcName := simplifyFunc(fn.Name)
|
||||
if dropRx.MatchString(funcName) {
|
||||
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user