Upgrade the operator to use Operator SDK v1.33.0 (#182)

* Move controller package inside internal directory

Based on the go/v4 project structure, the following changed:
- Pakcage `controllers` is now named `controller`
- Package `controller` now lives inside new `internal` directory

* Move main.go in cmd directory

Based on the new go/v4 project structure, `main.go` now lives in the `cmd` directory.

* Change package import in main.go

* Update go mod dependencies

Update the dependencies based on the versions obtained by creating a new operator project using `kubebuilder init --domain onepassword.com --plugins=go/v4`.

This is based on the migration steps provided to go from go/v3 to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4)

* Update vendor

* Adjust code for breaking changes from pkg update

sigs.k8s.io/controller-runtime package had breaking changes from v0.14.5 to v0.16.3. This commit brings the changes needed to achieve the same things using the new functionality avaialble.

* Adjust paths to connect yaml files

Since `main.go` is now in `cmd` directory, the paths to the files for deploying Connect have to be adjusted based on the new location `main.go` is executed from.

* Update files based on new structure and scaffolding

These changes are made based on the new project structure and scaffolding obtained when using the new go/v4 project structure.

These were done based on the migration steps mentioned when migrating to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4).

* Update config files

These updates are made based on the Kustomize v4 syntax.

This is part of the upgrate to go/v4 (https://book.kubebuilder.io/migration/migration_guide_gov3_to_gov4)

* Update dependencies and GO version

* Update vendor

* Update Kubernetes tools versions

* Update operator version in Makefile

Now the version in the Makefile matches the version of the operator

* Update Operator SDK version in version.go

* Adjust generated deepcopy

It seems that the +build tag is no longer needed based on the latest generated scaffolding, therefore it's removed.

* Update copyright year

* Bring back missing changes from migration

Some customization in Makefile was lost during the migration process. Specifically, the namespace customization for `make deploy` command.

Also, we push changes to kustomization.yaml for making the deploy process smoother.

* Add RBAC perms for coordination.k8s.io

It seems that with the latest changes to Kubernetes and Kustomize, we need to add additional RBAC to the service account used so that it can properly access the `leases` resource.

* Optimize Dockerfile

Dockerfile had a step for caching dependencies (go mod download). However, this is already done by the vendor directory, which we include. Therefore, this step can be removed to make the image build time faster.
This commit is contained in:
Eduard Filip
2024-01-25 14:21:31 +01:00
committed by GitHub
parent 8fc852a4dd
commit f72e5243b0
1356 changed files with 86780 additions and 43671 deletions

View File

@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.49.0
GOLANGCI_LINT_VERSION ?= v1.51.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
@@ -205,7 +207,7 @@ common-tarball: promu
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
@@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu
promu: $(PROMU)

View File

@@ -21,6 +21,7 @@ import (
// kernel data structures.
type FS struct {
proc fs.FS
real bool
}
// DefaultMountPoint is the common mount point of the proc filesystem.
@@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) {
if err != nil {
return FS{}, err
}
return FS{fs}, nil
real, err := isRealProc(mountPoint)
if err != nil {
return FS{}, err
}
return FS{fs, real}, nil
}

View File

@@ -0,0 +1,23 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build netbsd || openbsd || solaris || windows
// +build netbsd openbsd solaris windows
package procfs
// isRealProc returns true on architectures that don't have a Type argument
// in their Statfs_t struct
func isRealProc(mountPoint string) (bool, error) {
return true, nil
}

33
vendor/github.com/prometheus/procfs/fs_statfs_type.go generated vendored Normal file
View File

@@ -0,0 +1,33 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !netbsd && !openbsd && !solaris && !windows
// +build !netbsd,!openbsd,!solaris,!windows
package procfs
import (
"syscall"
)
// isRealProc determines whether supplied mountpoint is really a proc filesystem.
func isRealProc(mountPoint string) (bool, error) {
stat := syscall.Statfs_t{}
err := syscall.Statfs(mountPoint, &stat)
if err != nil {
return false, err
}
// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
return stat.Type == 0x9fa0, nil
}

View File

@@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
return us, nil
}
// Parses a uint64 from given hex in string.
func ParseHexUint64s(ss []string) ([]*uint64, error) {
us := make([]*uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return nil, err
}
us = append(us, &u)
}
return us, nil
}
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := os.ReadFile(path)

View File

@@ -186,6 +186,8 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64
// The average time from the point the client sends RPC requests until it receives the response.
AverageRTTMilliseconds float64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64
}
@@ -534,7 +536,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
ns = append(ns, n)
}
opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
@@ -546,6 +547,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7],
}
if ns[0] != 0 {
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
}
if len(ns) > 8 {
opStats.Errors = ns[8]

View File

@@ -18,7 +18,6 @@ import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
@@ -28,9 +27,13 @@ import (
// and contains netfilter conntrack statistics at one CPU core.
type ConntrackStatEntry struct {
Entries uint64
Searched uint64
Found uint64
New uint64
Invalid uint64
Ignore uint64
Delete uint64
DeleteList uint64
Insert uint64
InsertFailed uint64
Drop uint64
@@ -81,73 +84,34 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
// Parses a ConntrackStatEntry from given array of fields.
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
if len(fields) != 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
}
entry := &ConntrackStatEntry{}
entries, err := parseConntrackStatField(fields[0])
entries, err := util.ParseHexUint64s(fields)
if err != nil {
return nil, err
return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err)
}
entry.Entries = entries
found, err := parseConntrackStatField(fields[2])
if err != nil {
return nil, err
numEntries := len(entries)
if numEntries < 16 || numEntries > 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries)
}
entry.Found = found
invalid, err := parseConntrackStatField(fields[4])
if err != nil {
return nil, err
stats := &ConntrackStatEntry{
Entries: *entries[0],
Searched: *entries[1],
Found: *entries[2],
New: *entries[3],
Invalid: *entries[4],
Ignore: *entries[5],
Delete: *entries[6],
DeleteList: *entries[7],
Insert: *entries[8],
InsertFailed: *entries[9],
Drop: *entries[10],
EarlyDrop: *entries[11],
}
entry.Invalid = invalid
ignore, err := parseConntrackStatField(fields[5])
if err != nil {
return nil, err
// Ignore missing search_restart on Linux < 2.6.35.
if numEntries == 17 {
stats.SearchRestart = *entries[16]
}
entry.Ignore = ignore
insert, err := parseConntrackStatField(fields[8])
if err != nil {
return nil, err
}
entry.Insert = insert
insertFailed, err := parseConntrackStatField(fields[9])
if err != nil {
return nil, err
}
entry.InsertFailed = insertFailed
drop, err := parseConntrackStatField(fields[10])
if err != nil {
return nil, err
}
entry.Drop = drop
earlyDrop, err := parseConntrackStatField(fields[11])
if err != nil {
return nil, err
}
entry.EarlyDrop = earlyDrop
searchRestart, err := parseConntrackStatField(fields[16])
if err != nil {
return nil, err
}
entry.SearchRestart = searchRestart
return entry, nil
}
// Parses a uint64 from given hex in string.
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
}
return val, err
return stats, nil
}

View File

@@ -76,6 +76,7 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
s := bufio.NewScanner(r)
var stats []SoftnetStat
cpuIndex := 0
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
@@ -127,9 +128,13 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
softnetStat.SoftnetBacklogLen = us[0]
softnetStat.Index = us[1]
} else {
// For older kernels, create the Index based on the scan line number.
softnetStat.Index = uint32(cpuIndex)
}
softnetStat.Width = width
stats = append(stats, softnetStat)
cpuIndex++
}
return stats, nil

182
vendor/github.com/prometheus/procfs/net_wireless.go generated vendored Normal file
View File

@@ -0,0 +1,182 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Wireless models the content of /proc/net/wireless.
type Wireless struct {
Name string
// Status is the current 4-digit hex value status of the interface.
Status uint64
// QualityLink is the link quality.
QualityLink int
// QualityLevel is the signal gain (dBm).
QualityLevel int
// QualityNoise is the signal noise baseline (dBm).
QualityNoise int
// DiscardedNwid is the number of discarded packets with wrong nwid/essid.
DiscardedNwid int
// DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
DiscardedCrypt int
// DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
DiscardedFrag int
// DiscardedRetry is the number of discarded packets that reached max MAC retries.
DiscardedRetry int
// DiscardedMisc is the number of discarded packets for other reasons.
DiscardedMisc int
// MissedBeacon is the number of missed beacons/superframe.
MissedBeacon int
}
// Wireless returns kernel wireless statistics.
func (fs FS) Wireless() ([]*Wireless, error) {
b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
if err != nil {
return nil, err
}
m, err := parseWireless(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to parse wireless: %w", err)
}
return m, nil
}
// parseWireless parses the contents of /proc/net/wireless.
/*
Inter-| sta-| Quality | Discarded packets | Missed | WE
face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
eth1: 0000 5. -256. -10. 0 1 0 3 0 0
eth2: 0000 5. -256. -20. 0 2 0 4 0 0
*/
func parseWireless(r io.Reader) ([]*Wireless, error) {
var (
interfaces []*Wireless
scanner = bufio.NewScanner(r)
)
for n := 0; scanner.Scan(); n++ {
// Skip the 2 header lines.
if n < 2 {
continue
}
line := scanner.Text()
parts := strings.Split(line, ":")
if len(parts) != 2 {
return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line)
}
name := strings.TrimSpace(parts[0])
stats := strings.Fields(parts[1])
if len(stats) < 10 {
return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line)
}
status, err := strconv.ParseUint(stats[0], 16, 16)
if err != nil {
return nil, fmt.Errorf("invalid status in line %d: %q", n, line)
}
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err)
}
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err)
}
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err)
}
dnwid, err := strconv.Atoi(stats[4])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err)
}
dcrypt, err := strconv.Atoi(stats[5])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err)
}
dfrag, err := strconv.Atoi(stats[6])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err)
}
dretry, err := strconv.Atoi(stats[7])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err)
}
dmisc, err := strconv.Atoi(stats[8])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err)
}
mbeacon, err := strconv.Atoi(stats[9])
if err != nil {
return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err)
}
w := &Wireless{
Name: name,
Status: status,
QualityLink: qlink,
QualityLevel: qlevel,
QualityNoise: qnoise,
DiscardedNwid: dnwid,
DiscardedCrypt: dcrypt,
DiscardedFrag: dfrag,
DiscardedRetry: dretry,
DiscardedMisc: dmisc,
MissedBeacon: mbeacon,
}
interfaces = append(interfaces, w)
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err)
}
return interfaces, nil
}

View File

@@ -15,7 +15,6 @@ package procfs
import (
"bufio"
"io"
"os"
"path/filepath"
"strconv"
@@ -38,12 +37,7 @@ func (fs FS) NetStat() ([]NetStat, error) {
var netStatsTotal []NetStat
for _, filePath := range statFiles {
file, err := os.Open(filePath)
if err != nil {
return nil, err
}
procNetstat, err := parseNetstat(file)
procNetstat, err := parseNetstat(filePath)
if err != nil {
return nil, err
}
@@ -56,14 +50,17 @@ func (fs FS) NetStat() ([]NetStat, error) {
// parseNetstat parses the metrics from `/proc/net/stat/` file
// and returns a NetStat structure.
func parseNetstat(r io.Reader) (NetStat, error) {
var (
scanner = bufio.NewScanner(r)
netStat = NetStat{
Stats: make(map[string][]uint64),
}
)
func parseNetstat(filePath string) (NetStat, error) {
netStat := NetStat{
Stats: make(map[string][]uint64),
}
file, err := os.Open(filePath)
if err != nil {
return netStat, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
scanner.Scan()
// First string is always a header for stats

View File

@@ -21,7 +21,6 @@ import (
"strconv"
"strings"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@@ -30,7 +29,7 @@ type Proc struct {
// The process ID.
PID int
fs fs.FS
fs FS
}
// Procs represents a list of Proc structs.
@@ -92,7 +91,7 @@ func (fs FS) Proc(pid int) (Proc, error) {
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
return Proc{PID: pid, fs: fs.proc}, nil
return Proc{PID: pid, fs: fs}, nil
}
// AllProcs returns a list of all currently available processes.
@@ -114,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil {
continue
}
p = append(p, Proc{PID: int(pid), fs: fs.proc})
p = append(p, Proc{PID: int(pid), fs: fs})
}
return p, nil
@@ -237,6 +236,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
// FileDescriptorsLen returns the number of currently open file descriptors of
// a process.
func (p Proc) FileDescriptorsLen() (int, error) {
// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
if p.fs.real {
stat, err := os.Stat(p.path("fd"))
if err != nil {
return 0, err
}
size := stat.Size()
if size > 0 {
return int(size), nil
}
}
fds, err := p.fileDescriptors()
if err != nil {
return 0, err
@@ -285,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
}
func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
// FileDescriptorsInfo retrieves information about all file descriptors of

View File

@@ -18,7 +18,6 @@ import (
"fmt"
"os"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@@ -112,7 +111,7 @@ type ProcStat struct {
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
proc fs.FS
proc FS
}
// NewStat returns the current status information of the process.
@@ -210,8 +209,7 @@ func (s ProcStat) ResidentMemory() int {
// StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) {
fs := FS{proc: s.proc}
stat, err := fs.Stat()
stat, err := s.proc.Stat()
if err != nil {
return 0, err
}

View File

@@ -15,6 +15,7 @@ package procfs
import (
"bytes"
"sort"
"strconv"
"strings"
@@ -76,6 +77,9 @@ type ProcStatus struct {
UIDs [4]string
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string
// CpusAllowedList: List of cpu cores processes are allowed to run on.
CpusAllowedList []uint64
}
// NewStatus returns the current status information of the process.
@@ -161,10 +165,38 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.VoluntaryCtxtSwitches = vUint
case "nonvoluntary_ctxt_switches":
s.NonVoluntaryCtxtSwitches = vUint
case "Cpus_allowed_list":
s.CpusAllowedList = calcCpusAllowedList(vString)
}
}
// TotalCtxtSwitches returns the total context switch.
func (s ProcStatus) TotalCtxtSwitches() uint64 {
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
}
func calcCpusAllowedList(cpuString string) []uint64 {
s := strings.Split(cpuString, ",")
var g []uint64
for _, cpu := range s {
// parse cpu ranges, example: 1-3=[1,2,3]
if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
startCPU, _ := strconv.ParseUint(l[0], 10, 64)
endCPU, _ := strconv.ParseUint(l[1], 10, 64)
for i := startCPU; i <= endCPU; i++ {
g = append(g, i)
}
} else if len(l) == 1 {
cpu, _ := strconv.ParseUint(l[0], 10, 64)
g = append(g, cpu)
}
}
sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
return g
}

View File

@@ -54,7 +54,8 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
if err != nil {
continue
}
t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)})
t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}})
}
return t, nil
@@ -66,13 +67,13 @@ func (fs FS) Thread(pid, tid int) (Proc, error) {
if _, err := os.Stat(taskPath); err != nil {
return Proc{}, err
}
return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil
return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil
}
// Thread returns a process for a given TID of Proc.
func (proc Proc) Thread(tid int) (Proc, error) {
tfs := fsi.FS(proc.path("task"))
if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil {
tfs := FS{fsi.FS(proc.path("task")), proc.fs.real}
if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
return Proc{}, err
}
return Proc{PID: tid, fs: tfs}, nil